In [2]:
%matplotlib inline
import pandas as pd 
import numpy as np
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.multicomp import MultiComparison
from statsmodels.formula.api import ols
from scipy import stats
In [3]:
data = pd.read_csv("playlists.csv", sep=";", encoding = "ISO-8859-1") 
In [4]:
data.describe(include="all")
Out[4]:
company playlist_sample namesfiles no artist song sampleratefiles totalsamplesfiles durationfiles bitratefiles ... chromagramfiles_5 chromagramfiles_6 chromagramfiles_7 chromagramfiles_8 chromagramfiles_9 chromagramfiles_10 chromagramfiles_11 chromagramfiles_12 attackslopefiles attackleapfiles
count 1782 1782.000000 1782 1782.000000 1782 1782 1782.0 1.782000e+03 1782.000000 1782.000000 ... 1782.000000 1782.000000 1782.000000 1782.000000 1782.000000 1782.000000 1782.000000 1782.000000 1782.000000 1782.000000
unique 6 NaN 515 NaN 353 443 NaN NaN NaN NaN ... NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
top Arte Francés NaN 06 - Goldfish - We Come Together (Fishybeat Mi... NaN Satin Jackets Mirage.mp3 ... NaN NaN NaN NaN ... NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
freq 441 NaN 6 NaN 51 12 NaN NaN NaN NaN ... NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
mean NaN 2.084175 NaN 17.116162 NaN NaN 44100.0 1.043632e+07 236.651237 252.336700 ... 0.332301 0.319191 0.265246 0.440462 0.549565 0.581967 0.477825 0.430522 15.804409 0.507503
std NaN 1.114796 NaN 11.837401 NaN NaN 0.0 3.227105e+06 73.176981 88.377597 ... 0.270616 0.263919 0.249612 0.290454 0.314771 0.323173 0.321646 0.295563 9.338659 0.247587
min NaN 1.000000 NaN 1.000000 NaN NaN 44100.0 5.965054e+06 135.262000 128.000000 ... 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.168304
25% NaN 1.000000 NaN 8.000000 NaN NaN 44100.0 8.353151e+06 189.413850 128.000000 ... 0.116591 0.109123 0.075378 0.204738 0.301961 0.316822 0.214680 0.203097 9.810711 0.285584
50% NaN 2.000000 NaN 15.500000 NaN NaN 44100.0 9.480378e+06 214.974562 320.000000 ... 0.262799 0.259887 0.183082 0.396861 0.520957 0.570088 0.418077 0.379737 14.833864 0.452456
75% NaN 3.000000 NaN 24.000000 NaN NaN 44100.0 1.146931e+07 260.075075 320.000000 ... 0.494897 0.469603 0.384485 0.642814 0.829318 0.918554 0.735384 0.616558 19.964413 0.730669
max NaN 5.000000 NaN 65.000000 NaN NaN 44100.0 2.843136e+07 644.702000 320.000000 ... 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 66.233620 0.999408

11 rows × 64 columns

Find positives and negatives songs of the process for every company.

In [5]:
companies = data['company'].unique()
by_company = [data[data.company == company] for company in companies]
positives = []
negatives = []
for data_com in by_company:
    data_com = data_com.sort_values('playlist_sample')
    last_pl = int(data_com.tail(1).playlist_sample)
    pls = pd.DataFrame({'pl':range (1,last_pl+1), 'old':[0]*last_pl, 'new':[0]*last_pl})
    # pls.new[0]=(data_com.query('playlist_sample == '+str(1)).shape[0])/3
    # for i in range(2,last_pl+1):
      #  curr_pl = data_com.query('playlist_sample == '+str(i))
      #  pre_pl = data_com.query('playlist_sample == '+str(i-1))
      #  olds = curr_pl['song'].map(pre_pl['song'].value_counts()).sum(axis = 0)/3
      #  pls.old[i-1]= olds/3 
      #  pls.new[i-1]=(curr_pl.shape[0]-olds)/3
    # pls[['old','new']].plot(kind='bar', stacked=True, title=data_com.iloc[0,0])
    df_last_pl= data_com.query('playlist_sample == '+str(last_pl))
    positives.append(df_last_pl)
    pos_loc = pd.DataFrame({}, columns=data_com.columns)
    for index, row in data_com[data_com.playlist_sample<last_pl].iterrows(): 
        if not ((df_last_pl['artist'] == row['artist']) & (df_last_pl['song'] ==  row['song'])).any():
            pos_loc= pos_loc.append(row, ignore_index=True)
    # n_vs_p = pd.DataFrame({'sam':['pos', 'neg'],'num':[df_last_pl.shape[0]/3,pos_loc.shape[0]/3]})
    # n_vs_p.plot.bar(x='sam', y='num', rot=0, title=data_com.iloc[0,0])
    negatives.append(pos_loc)
In [6]:
df_n_ps = []
for i in range(len(negatives)):
    negatives[i]['chosen']=0
    positives[i]['chosen']=1
    df_n_ps.append(negatives[i].append(positives[i]))
D:\Usuarios\1144084318\AppData\Roaming\Python\Python37\site-packages\ipykernel_launcher.py:4: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
  after removing the cwd from sys.path.

ANOVA

In [7]:
import warnings
import matplotlib.pyplot as plt
import math
import seaborn as sns
warnings.filterwarnings('ignore')
In [ ]:
alpha = 0.05
for df_n_p in df_n_ps:
    df_n_p = df_n_p.fillna(0)
    fig = plt.figure(figsize=(17,200))
    i=1
    for index in range(8,df_n_p.shape[1]-1):
        name= df_n_p.columns.values[index]
        df_n_p[name]=df_n_p[name].astype('float64') 
        mc = MultiComparison(df_n_p[name], df_n_p['chosen'])
        mc_results = mc.tukeyhsd()
        if mc_results._results_table.data[1:][0][5]:
            # print(name)
            results = ols(name+' ~ C(chosen)', data=df_n_p).fit()
            homogeneity_test = stats.levene(df_n_p[name][df_n_p['chosen'] == 0], df_n_p[name][df_n_p['chosen'] == 1])[1]
            normality_test = stats.shapiro(results.resid)[1]
            if homogeneity_test > alpha and normality_test > alpha:
                # print(results.summary())
                ax = fig.add_subplot(math.ceil(df_n_p.shape[1]-9/2), 2, i)
                sns.kdeplot(df_n_p.loc[df_n_p.chosen==0][name], shade=True, ax=ax);
                sns.kdeplot(df_n_p.loc[df_n_p.chosen==1][name], shade=True, ax=ax);
                plt.title(df_n_p.iloc[0,0].upper()+" "+name)
                plt.legend(['neg', 'pos'])
                i+=1

MFCC

In [8]:
from collections import Counter


from sklearn.cluster import KMeans
from sklearn.metrics import confusion_matrix, accuracy_score, silhouette_samples, silhouette_score, calinski_harabaz_score
from sklearn import preprocessing
from sklearn.decomposition import PCA
In [12]:
for i in range(len(companies)):
    df_n_ps[i].bitratefiles = df_n_ps[i].bitratefiles.astype('float64')
    df_n_ps[i].pitchfiles = df_n_ps[i].pitchfiles.astype('float64')
    df_n_ps[i].bestkeyfiles = df_n_ps[i].bestkeyfiles.astype('float64')
df_n_ps[0].info()
<class 'pandas.core.frame.DataFrame'>
Int64Index: 372 entries, 0 to 179
Data columns (total 65 columns):
company                 372 non-null object
playlist_sample         372 non-null object
namesfiles              372 non-null object
no                      372 non-null object
artist                  372 non-null object
song                    372 non-null object
sampleratefiles         372 non-null object
totalsamplesfiles       372 non-null object
durationfiles           372 non-null float64
bitratefiles            372 non-null float64
rmsfiles                372 non-null float64
rmsmedianfiles          372 non-null float64
lowenergyfiles          372 non-null float64
ASRfiles                372 non-null float64
beatspectrumfiles       372 non-null float64
eventdensityfiles       372 non-null float64
tempofiles              372 non-null float64
pulseclarityfiles       372 non-null float64
zerocrossfiles          372 non-null float64
rolloffsfiles           372 non-null float64
brightnessfiles         372 non-null float64
spreadfiles             372 non-null float64
centroidfiles           371 non-null float64
kurtosisfiles           372 non-null float64
flatnessfiles           372 non-null float64
entropyfiles            372 non-null float64
mfccfiles_1             372 non-null float64
mfccfiles_2             372 non-null float64
mfccfiles_3             372 non-null float64
mfccfiles_4             372 non-null float64
mfccfiles_5             372 non-null float64
mfccfiles_6             372 non-null float64
mfccfiles_7             372 non-null float64
mfccfiles_8             372 non-null float64
mfccfiles_9             372 non-null float64
mfccfiles_10            372 non-null float64
mfccfiles_11            372 non-null float64
mfccfiles_12            372 non-null float64
mfccfiles_13            372 non-null float64
pitchfiles              372 non-null float64
inharmonicityfiles      372 non-null float64
bestkeyfiles            372 non-null float64
keyclarityfiles         372 non-null float64
modalityfiles           372 non-null float64
tonalcentroidfiles_1    372 non-null float64
tonalcentroidfiles_2    372 non-null float64
tonalcentroidfiles_3    372 non-null float64
tonalcentroidfiles_4    372 non-null float64
tonalcentroidfiles_5    372 non-null float64
tonalcentroidfiles_6    372 non-null float64
chromagramfiles_1       372 non-null float64
chromagramfiles_2       372 non-null float64
chromagramfiles_3       372 non-null float64
chromagramfiles_4       372 non-null float64
chromagramfiles_5       372 non-null float64
chromagramfiles_6       372 non-null float64
chromagramfiles_7       372 non-null float64
chromagramfiles_8       372 non-null float64
chromagramfiles_9       372 non-null float64
chromagramfiles_10      372 non-null float64
chromagramfiles_11      372 non-null float64
chromagramfiles_12      372 non-null float64
attackslopefiles        372 non-null float64
attackleapfiles         372 non-null float64
chosen                  372 non-null int64
dtypes: float64(56), int64(1), object(8)
memory usage: 191.8+ KB

Vamos a reemplazar los NaN y entonces a normalizar los datos para que todas las variables tengan la misma importancia. Solo vamos a considerar los datos numéricos.

In [13]:
df_n_ps_std = [0]*len(companies)
for i in range(len(companies)):
    df_n_ps[i] = df_n_ps[i].fillna(0)
    df_n_ps_std[i] = pd.DataFrame(preprocessing.scale(df_n_ps[i].iloc[:,8:]))
    df_n_ps_std[i].columns=df_n_ps[i].columns[8:]
df_n_ps_std[0].mean(axis=0)
Out[13]:
durationfiles          -4.261824e-16
bitratefiles            0.000000e+00
rmsfiles                4.303606e-16
rmsmedianfiles         -4.279731e-16
lowenergyfiles         -2.387576e-18
ASRfiles               -2.023471e-16
beatspectrumfiles       2.477111e-16
eventdensityfiles      -7.879002e-17
tempofiles             -3.133694e-17
pulseclarityfiles       3.103849e-17
zerocrossfiles         -2.930750e-16
rolloffsfiles           5.789873e-16
brightnessfiles        -8.356517e-17
spreadfiles            -3.842506e-16
centroidfiles          -2.142850e-16
kurtosisfiles          -6.327077e-17
flatnessfiles           1.366887e-16
entropyfiles            3.516900e-15
mfccfiles_1            -1.921999e-16
mfccfiles_2            -5.372047e-18
mfccfiles_3            -1.178120e-16
mfccfiles_4            -2.648718e-17
mfccfiles_5            -4.655774e-17
mfccfiles_6            -1.193788e-18
mfccfiles_7             6.002516e-17
mfccfiles_8             1.492235e-17
mfccfiles_9             5.133289e-17
mfccfiles_10            2.596489e-17
mfccfiles_11           -3.402296e-17
mfccfiles_12           -4.775153e-18
mfccfiles_13           -4.476706e-18
pitchfiles              0.000000e+00
inharmonicityfiles      2.595743e-15
bestkeyfiles           -8.475896e-17
keyclarityfiles         5.369062e-16
modalityfiles          -3.282918e-17
tonalcentroidfiles_1   -1.522080e-17
tonalcentroidfiles_2   -6.565835e-18
tonalcentroidfiles_3   -9.699529e-18
tonalcentroidfiles_4    1.671303e-17
tonalcentroidfiles_5   -2.193586e-17
tonalcentroidfiles_6    2.059285e-17
chromagramfiles_1      -1.811574e-16
chromagramfiles_2      -4.282715e-17
chromagramfiles_3       4.819920e-17
chromagramfiles_4      -2.188363e-16
chromagramfiles_5      -3.282918e-18
chromagramfiles_6      -8.834033e-17
chromagramfiles_7       3.730588e-17
chromagramfiles_8      -1.140068e-16
chromagramfiles_9      -2.715868e-17
chromagramfiles_10     -6.707597e-17
chromagramfiles_11     -6.051014e-17
chromagramfiles_12      2.148446e-16
attackslopefiles       -4.327482e-17
attackleapfiles        -1.551925e-16
chosen                 -3.068036e-16
dtype: float64
In [14]:
df_n_ps_std[0].std(axis=0)
Out[14]:
durationfiles           1.001347
bitratefiles            0.000000
rmsfiles                1.001347
rmsmedianfiles          1.001347
lowenergyfiles          1.001347
ASRfiles                1.001347
beatspectrumfiles       1.001347
eventdensityfiles       1.001347
tempofiles              1.001347
pulseclarityfiles       1.001347
zerocrossfiles          1.001347
rolloffsfiles           1.001347
brightnessfiles         1.001347
spreadfiles             1.001347
centroidfiles           1.001347
kurtosisfiles           1.001347
flatnessfiles           1.001347
entropyfiles            1.001347
mfccfiles_1             1.001347
mfccfiles_2             1.001347
mfccfiles_3             1.001347
mfccfiles_4             1.001347
mfccfiles_5             1.001347
mfccfiles_6             1.001347
mfccfiles_7             1.001347
mfccfiles_8             1.001347
mfccfiles_9             1.001347
mfccfiles_10            1.001347
mfccfiles_11            1.001347
mfccfiles_12            1.001347
mfccfiles_13            1.001347
pitchfiles              0.000000
inharmonicityfiles      1.001347
bestkeyfiles            1.001347
keyclarityfiles         1.001347
modalityfiles           1.001347
tonalcentroidfiles_1    1.001347
tonalcentroidfiles_2    1.001347
tonalcentroidfiles_3    1.001347
tonalcentroidfiles_4    1.001347
tonalcentroidfiles_5    1.001347
tonalcentroidfiles_6    1.001347
chromagramfiles_1       1.001347
chromagramfiles_2       1.001347
chromagramfiles_3       1.001347
chromagramfiles_4       1.001347
chromagramfiles_5       1.001347
chromagramfiles_6       1.001347
chromagramfiles_7       1.001347
chromagramfiles_8       1.001347
chromagramfiles_9       1.001347
chromagramfiles_10      1.001347
chromagramfiles_11      1.001347
chromagramfiles_12      1.001347
attackslopefiles        1.001347
attackleapfiles         1.001347
chosen                  1.001347
dtype: float64

Borramos pitch y bitrate porque todos sus valores son 0.

In [15]:
for i in range(len(companies)):
    df_n_ps_std[i] = df_n_ps_std[i].drop(columns="pitchfiles")
    df_n_ps_std[i] = df_n_ps_std[i].drop(columns="bitratefiles")
In [16]:
df_n_ps_std[0].columns
Out[16]:
Index(['durationfiles', 'rmsfiles', 'rmsmedianfiles', 'lowenergyfiles',
       'ASRfiles', 'beatspectrumfiles', 'eventdensityfiles', 'tempofiles',
       'pulseclarityfiles', 'zerocrossfiles', 'rolloffsfiles',
       'brightnessfiles', 'spreadfiles', 'centroidfiles', 'kurtosisfiles',
       'flatnessfiles', 'entropyfiles', 'mfccfiles_1', 'mfccfiles_2',
       'mfccfiles_3', 'mfccfiles_4', 'mfccfiles_5', 'mfccfiles_6',
       'mfccfiles_7', 'mfccfiles_8', 'mfccfiles_9', 'mfccfiles_10',
       'mfccfiles_11', 'mfccfiles_12', 'mfccfiles_13', 'inharmonicityfiles',
       'bestkeyfiles', 'keyclarityfiles', 'modalityfiles',
       'tonalcentroidfiles_1', 'tonalcentroidfiles_2', 'tonalcentroidfiles_3',
       'tonalcentroidfiles_4', 'tonalcentroidfiles_5', 'tonalcentroidfiles_6',
       'chromagramfiles_1', 'chromagramfiles_2', 'chromagramfiles_3',
       'chromagramfiles_4', 'chromagramfiles_5', 'chromagramfiles_6',
       'chromagramfiles_7', 'chromagramfiles_8', 'chromagramfiles_9',
       'chromagramfiles_10', 'chromagramfiles_11', 'chromagramfiles_12',
       'attackslopefiles', 'attackleapfiles', 'chosen'],
      dtype='object')
In [17]:
df_n_ps_std[0].columns[17:30]
Out[17]:
Index(['mfccfiles_1', 'mfccfiles_2', 'mfccfiles_3', 'mfccfiles_4',
       'mfccfiles_5', 'mfccfiles_6', 'mfccfiles_7', 'mfccfiles_8',
       'mfccfiles_9', 'mfccfiles_10', 'mfccfiles_11', 'mfccfiles_12',
       'mfccfiles_13'],
      dtype='object')
In [18]:
df_n_ps_std_mfcc = [None]*len(companies)
for i in range(len(companies)):
    df_n_ps_std_mfcc[i] = pd.DataFrame(df_n_ps_std[i].iloc[:,17:30])
    df_n_ps_std_mfcc[i].columns=df_n_ps_std[i].columns[17:30]
df_n_ps_std_mfcc[0].info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 372 entries, 0 to 371
Data columns (total 13 columns):
mfccfiles_1     372 non-null float64
mfccfiles_2     372 non-null float64
mfccfiles_3     372 non-null float64
mfccfiles_4     372 non-null float64
mfccfiles_5     372 non-null float64
mfccfiles_6     372 non-null float64
mfccfiles_7     372 non-null float64
mfccfiles_8     372 non-null float64
mfccfiles_9     372 non-null float64
mfccfiles_10    372 non-null float64
mfccfiles_11    372 non-null float64
mfccfiles_12    372 non-null float64
mfccfiles_13    372 non-null float64
dtypes: float64(13)
memory usage: 37.9 KB

Arte Francés

ANN

In [9]:
import keras
keras.__version__
Using TensorFlow backend.
Out[9]:
'2.3.0'
In [60]:
from keras.layers import Input, Flatten, Dense#, Lambda
from keras.models import Model
from keras import layers
from keras import models, optimizers

from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import GridSearchCV #permite buscar la mejor configuración de parámetros con C-V
from sklearn.metrics import make_scorer # permite crear una clase scorer a partir de una función de score (necesario para el kappa)
from sklearn.metrics import accuracy_score, cohen_kappa_score, classification_report, roc_auc_score
from sklearn.model_selection import train_test_split #metodo de particionamiento de datasets para evaluación
from sklearn.preprocessing import StandardScaler
In [16]:
X = df_n_ps_std_mfcc[0]
In [17]:
y = df_n_ps[0]['chosen']
In [18]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [19]:
X_train.shape
Out[19]:
(279, 13)
In [24]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [25]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [26]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [27]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'tanh', 'hidden_layer_sizes': (20,), 'learning_rate_init': 0.006, 'max_iter': 300}, que permiten obtener un Accuracy de 82.08% y un Kappa del 43.49
Tiempo total: 29.12 minutos
C:\ProgramData\Anaconda3\lib\site-packages\sklearn\neural_network\multilayer_perceptron.py:564: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (300) reached and the optimization hasn't converged yet.
  % self.max_iter, ConvergenceWarning)
In [35]:
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = 0.006
epochs = 300
In [36]:
input_tensor = Input(shape = (n0,))
In [37]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = 'tanh')(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [38]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [39]:
model.summary()
Model: "model_2"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_2 (InputLayer)         (None, 13)                0         
_________________________________________________________________
dense_3 (Dense)              (None, 20)                280       
_________________________________________________________________
dense_4 (Dense)              (None, 1)                 21        
=================================================================
Total params: 301
Trainable params: 301
Non-trainable params: 0
_________________________________________________________________
In [40]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), batch_size= 32,
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 279 samples, validate on 93 samples
Epoch 1/300
279/279 [==============================] - 1s 3ms/step - loss: 0.7063 - accuracy: 0.5197 - val_loss: 0.6660 - val_accuracy: 0.5914
Epoch 2/300
279/279 [==============================] - 0s 61us/step - loss: 0.6003 - accuracy: 0.6918 - val_loss: 0.5741 - val_accuracy: 0.7419
Epoch 3/300
279/279 [==============================] - 0s 68us/step - loss: 0.5317 - accuracy: 0.7348 - val_loss: 0.5278 - val_accuracy: 0.7527
Epoch 4/300
279/279 [==============================] - 0s 72us/step - loss: 0.4996 - accuracy: 0.7634 - val_loss: 0.5053 - val_accuracy: 0.7742
Epoch 5/300
279/279 [==============================] - 0s 75us/step - loss: 0.4770 - accuracy: 0.7778 - val_loss: 0.4938 - val_accuracy: 0.8065
Epoch 6/300
279/279 [==============================] - 0s 90us/step - loss: 0.4639 - accuracy: 0.7778 - val_loss: 0.4845 - val_accuracy: 0.7957
Epoch 7/300
279/279 [==============================] - 0s 82us/step - loss: 0.4526 - accuracy: 0.7849 - val_loss: 0.4813 - val_accuracy: 0.8172
Epoch 8/300
279/279 [==============================] - 0s 75us/step - loss: 0.4466 - accuracy: 0.7921 - val_loss: 0.4822 - val_accuracy: 0.8065
Epoch 9/300
279/279 [==============================] - 0s 79us/step - loss: 0.4412 - accuracy: 0.7993 - val_loss: 0.4819 - val_accuracy: 0.8172
Epoch 10/300
279/279 [==============================] - 0s 82us/step - loss: 0.4373 - accuracy: 0.8065 - val_loss: 0.4840 - val_accuracy: 0.7957
Epoch 11/300
279/279 [==============================] - 0s 86us/step - loss: 0.4343 - accuracy: 0.8136 - val_loss: 0.4827 - val_accuracy: 0.7957
Epoch 12/300
279/279 [==============================] - 0s 75us/step - loss: 0.4328 - accuracy: 0.8100 - val_loss: 0.4866 - val_accuracy: 0.7849
Epoch 13/300
279/279 [==============================] - 0s 82us/step - loss: 0.4225 - accuracy: 0.8136 - val_loss: 0.4860 - val_accuracy: 0.7957
Epoch 14/300
279/279 [==============================] - 0s 75us/step - loss: 0.4186 - accuracy: 0.8136 - val_loss: 0.4865 - val_accuracy: 0.7849
Epoch 15/300
279/279 [==============================] - 0s 82us/step - loss: 0.4118 - accuracy: 0.8136 - val_loss: 0.4846 - val_accuracy: 0.7849
Epoch 16/300
279/279 [==============================] - 0s 79us/step - loss: 0.4080 - accuracy: 0.8208 - val_loss: 0.4901 - val_accuracy: 0.7849
Epoch 17/300
279/279 [==============================] - 0s 107us/step - loss: 0.4009 - accuracy: 0.8351 - val_loss: 0.4878 - val_accuracy: 0.7742

Epoch 00017: ReduceLROnPlateau reducing learning rate to 0.003000000026077032.
Epoch 18/300
279/279 [==============================] - 0s 107us/step - loss: 0.3950 - accuracy: 0.8387 - val_loss: 0.4864 - val_accuracy: 0.7742
Epoch 19/300
279/279 [==============================] - 0s 68us/step - loss: 0.3922 - accuracy: 0.8387 - val_loss: 0.4852 - val_accuracy: 0.7742
Epoch 20/300
279/279 [==============================] - 0s 86us/step - loss: 0.3886 - accuracy: 0.8530 - val_loss: 0.4807 - val_accuracy: 0.7849
Epoch 21/300
279/279 [==============================] - 0s 64us/step - loss: 0.3865 - accuracy: 0.8566 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 22/300
279/279 [==============================] - 0s 82us/step - loss: 0.3828 - accuracy: 0.8530 - val_loss: 0.4848 - val_accuracy: 0.7849
Epoch 23/300
279/279 [==============================] - 0s 79us/step - loss: 0.3800 - accuracy: 0.8566 - val_loss: 0.4855 - val_accuracy: 0.7849
Epoch 24/300
279/279 [==============================] - 0s 68us/step - loss: 0.3760 - accuracy: 0.8638 - val_loss: 0.4818 - val_accuracy: 0.7849
Epoch 25/300
279/279 [==============================] - 0s 79us/step - loss: 0.3744 - accuracy: 0.8602 - val_loss: 0.4826 - val_accuracy: 0.7849
Epoch 26/300
279/279 [==============================] - 0s 86us/step - loss: 0.3705 - accuracy: 0.8710 - val_loss: 0.4781 - val_accuracy: 0.7849
Epoch 27/300
279/279 [==============================] - 0s 86us/step - loss: 0.3666 - accuracy: 0.8746 - val_loss: 0.4785 - val_accuracy: 0.7849

Epoch 00027: ReduceLROnPlateau reducing learning rate to 0.001500000013038516.
Epoch 28/300
279/279 [==============================] - 0s 79us/step - loss: 0.3637 - accuracy: 0.8746 - val_loss: 0.4799 - val_accuracy: 0.7849
Epoch 29/300
279/279 [==============================] - 0s 75us/step - loss: 0.3620 - accuracy: 0.8746 - val_loss: 0.4816 - val_accuracy: 0.7849
Epoch 30/300
279/279 [==============================] - 0s 79us/step - loss: 0.3604 - accuracy: 0.8746 - val_loss: 0.4826 - val_accuracy: 0.7849
Epoch 31/300
279/279 [==============================] - 0s 75us/step - loss: 0.3596 - accuracy: 0.8746 - val_loss: 0.4831 - val_accuracy: 0.7849
Epoch 32/300
279/279 [==============================] - 0s 79us/step - loss: 0.3572 - accuracy: 0.8781 - val_loss: 0.4832 - val_accuracy: 0.7849
Epoch 33/300
279/279 [==============================] - 0s 79us/step - loss: 0.3555 - accuracy: 0.8781 - val_loss: 0.4817 - val_accuracy: 0.7849
Epoch 34/300
279/279 [==============================] - 0s 79us/step - loss: 0.3540 - accuracy: 0.8817 - val_loss: 0.4829 - val_accuracy: 0.7849
Epoch 35/300
279/279 [==============================] - 0s 82us/step - loss: 0.3527 - accuracy: 0.8817 - val_loss: 0.4817 - val_accuracy: 0.7849
Epoch 36/300
279/279 [==============================] - 0s 75us/step - loss: 0.3510 - accuracy: 0.8817 - val_loss: 0.4829 - val_accuracy: 0.7849
Epoch 37/300
279/279 [==============================] - 0s 104us/step - loss: 0.3493 - accuracy: 0.8781 - val_loss: 0.4836 - val_accuracy: 0.7849

Epoch 00037: ReduceLROnPlateau reducing learning rate to 0.000750000006519258.
Epoch 38/300
279/279 [==============================] - 0s 86us/step - loss: 0.3476 - accuracy: 0.8817 - val_loss: 0.4825 - val_accuracy: 0.7849
Epoch 39/300
279/279 [==============================] - 0s 82us/step - loss: 0.3469 - accuracy: 0.8817 - val_loss: 0.4823 - val_accuracy: 0.7849
Epoch 40/300
279/279 [==============================] - 0s 75us/step - loss: 0.3457 - accuracy: 0.8817 - val_loss: 0.4825 - val_accuracy: 0.7849
Epoch 41/300
279/279 [==============================] - 0s 82us/step - loss: 0.3449 - accuracy: 0.8817 - val_loss: 0.4826 - val_accuracy: 0.7849
Epoch 42/300
279/279 [==============================] - 0s 90us/step - loss: 0.3443 - accuracy: 0.8817 - val_loss: 0.4830 - val_accuracy: 0.7849
Epoch 43/300
279/279 [==============================] - 0s 104us/step - loss: 0.3434 - accuracy: 0.8817 - val_loss: 0.4833 - val_accuracy: 0.7849
Epoch 44/300
279/279 [==============================] - 0s 93us/step - loss: 0.3427 - accuracy: 0.8817 - val_loss: 0.4835 - val_accuracy: 0.7849
Epoch 45/300
279/279 [==============================] - 0s 90us/step - loss: 0.3418 - accuracy: 0.8817 - val_loss: 0.4831 - val_accuracy: 0.7849
Epoch 46/300
279/279 [==============================] - 0s 93us/step - loss: 0.3411 - accuracy: 0.8817 - val_loss: 0.4832 - val_accuracy: 0.7849
Epoch 47/300
279/279 [==============================] - 0s 97us/step - loss: 0.3403 - accuracy: 0.8817 - val_loss: 0.4832 - val_accuracy: 0.7849

Epoch 00047: ReduceLROnPlateau reducing learning rate to 0.000375000003259629.
Epoch 48/300
279/279 [==============================] - 0s 111us/step - loss: 0.3394 - accuracy: 0.8817 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 49/300
279/279 [==============================] - 0s 100us/step - loss: 0.3389 - accuracy: 0.8817 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 50/300
279/279 [==============================] - 0s 111us/step - loss: 0.3386 - accuracy: 0.8817 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 51/300
279/279 [==============================] - 0s 115us/step - loss: 0.3381 - accuracy: 0.8817 - val_loss: 0.4827 - val_accuracy: 0.7849
Epoch 52/300
279/279 [==============================] - 0s 136us/step - loss: 0.3378 - accuracy: 0.8817 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 53/300
279/279 [==============================] - 0s 118us/step - loss: 0.3373 - accuracy: 0.8817 - val_loss: 0.4827 - val_accuracy: 0.7849
Epoch 54/300
279/279 [==============================] - 0s 86us/step - loss: 0.3370 - accuracy: 0.8817 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 55/300
279/279 [==============================] - 0s 115us/step - loss: 0.3365 - accuracy: 0.8817 - val_loss: 0.4825 - val_accuracy: 0.7849
Epoch 56/300
279/279 [==============================] - 0s 107us/step - loss: 0.3362 - accuracy: 0.8817 - val_loss: 0.4827 - val_accuracy: 0.7849
Epoch 57/300
279/279 [==============================] - 0s 122us/step - loss: 0.3358 - accuracy: 0.8853 - val_loss: 0.4830 - val_accuracy: 0.7849

Epoch 00057: ReduceLROnPlateau reducing learning rate to 0.0001875000016298145.
Epoch 58/300
279/279 [==============================] - 0s 104us/step - loss: 0.3353 - accuracy: 0.8853 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 59/300
279/279 [==============================] - 0s 107us/step - loss: 0.3351 - accuracy: 0.8853 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 60/300
279/279 [==============================] - 0s 104us/step - loss: 0.3349 - accuracy: 0.8853 - val_loss: 0.4829 - val_accuracy: 0.7849
Epoch 61/300
279/279 [==============================] - 0s 104us/step - loss: 0.3347 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 62/300
279/279 [==============================] - 0s 100us/step - loss: 0.3345 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 63/300
279/279 [==============================] - 0s 79us/step - loss: 0.3343 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 64/300
279/279 [==============================] - 0s 90us/step - loss: 0.3341 - accuracy: 0.8889 - val_loss: 0.4829 - val_accuracy: 0.7849
Epoch 65/300
279/279 [==============================] - 0s 93us/step - loss: 0.3339 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 66/300
279/279 [==============================] - 0s 104us/step - loss: 0.3337 - accuracy: 0.8889 - val_loss: 0.4829 - val_accuracy: 0.7849
Epoch 67/300
279/279 [==============================] - 0s 79us/step - loss: 0.3335 - accuracy: 0.8889 - val_loss: 0.4826 - val_accuracy: 0.7849

Epoch 00067: ReduceLROnPlateau reducing learning rate to 9.375000081490725e-05.
Epoch 68/300
279/279 [==============================] - 0s 100us/step - loss: 0.3333 - accuracy: 0.8889 - val_loss: 0.4827 - val_accuracy: 0.7849
Epoch 69/300
279/279 [==============================] - 0s 104us/step - loss: 0.3332 - accuracy: 0.8889 - val_loss: 0.4827 - val_accuracy: 0.7849
Epoch 70/300
279/279 [==============================] - 0s 90us/step - loss: 0.3331 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 71/300
279/279 [==============================] - 0s 100us/step - loss: 0.3330 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 72/300
279/279 [==============================] - 0s 107us/step - loss: 0.3329 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 73/300
279/279 [==============================] - 0s 104us/step - loss: 0.3328 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 74/300
279/279 [==============================] - 0s 118us/step - loss: 0.3327 - accuracy: 0.8889 - val_loss: 0.4827 - val_accuracy: 0.7849
Epoch 75/300
279/279 [==============================] - 0s 100us/step - loss: 0.3326 - accuracy: 0.8889 - val_loss: 0.4827 - val_accuracy: 0.7849
Epoch 76/300
279/279 [==============================] - 0s 90us/step - loss: 0.3325 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 77/300
279/279 [==============================] - 0s 72us/step - loss: 0.3324 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00077: ReduceLROnPlateau reducing learning rate to 4.6875000407453626e-05.
Epoch 78/300
279/279 [==============================] - 0s 111us/step - loss: 0.3323 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 79/300
279/279 [==============================] - 0s 97us/step - loss: 0.3322 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 80/300
279/279 [==============================] - 0s 97us/step - loss: 0.3322 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 81/300
279/279 [==============================] - 0s 97us/step - loss: 0.3321 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 82/300
279/279 [==============================] - 0s 104us/step - loss: 0.3321 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 83/300
279/279 [==============================] - 0s 86us/step - loss: 0.3320 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 84/300
279/279 [==============================] - 0s 86us/step - loss: 0.3320 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 85/300
279/279 [==============================] - 0s 111us/step - loss: 0.3319 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 86/300
279/279 [==============================] - 0s 93us/step - loss: 0.3319 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 87/300
279/279 [==============================] - 0s 90us/step - loss: 0.3318 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00087: ReduceLROnPlateau reducing learning rate to 2.3437500203726813e-05.
Epoch 88/300
279/279 [==============================] - 0s 90us/step - loss: 0.3318 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 89/300
279/279 [==============================] - 0s 104us/step - loss: 0.3317 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 90/300
279/279 [==============================] - 0s 100us/step - loss: 0.3317 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 91/300
279/279 [==============================] - 0s 93us/step - loss: 0.3317 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 92/300
279/279 [==============================] - 0s 93us/step - loss: 0.3317 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 93/300
279/279 [==============================] - 0s 93us/step - loss: 0.3316 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 94/300
279/279 [==============================] - 0s 100us/step - loss: 0.3316 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 95/300
279/279 [==============================] - 0s 90us/step - loss: 0.3316 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 96/300
279/279 [==============================] - 0s 97us/step - loss: 0.3316 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 97/300
279/279 [==============================] - 0s 100us/step - loss: 0.3315 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00097: ReduceLROnPlateau reducing learning rate to 1.1718750101863407e-05.
Epoch 98/300
279/279 [==============================] - 0s 97us/step - loss: 0.3315 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 99/300
279/279 [==============================] - ETA: 0s - loss: 0.3474 - accuracy: 0.81 - 0s 93us/step - loss: 0.3315 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 100/300
279/279 [==============================] - ETA: 0s - loss: 0.2551 - accuracy: 0.93 - 0s 93us/step - loss: 0.3315 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 101/300
279/279 [==============================] - 0s 107us/step - loss: 0.3315 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 102/300
279/279 [==============================] - 0s 93us/step - loss: 0.3315 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 103/300
279/279 [==============================] - 0s 86us/step - loss: 0.3315 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 104/300
279/279 [==============================] - 0s 97us/step - loss: 0.3314 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 105/300
279/279 [==============================] - 0s 97us/step - loss: 0.3314 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 106/300
279/279 [==============================] - 0s 97us/step - loss: 0.3314 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 107/300
279/279 [==============================] - 0s 97us/step - loss: 0.3314 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00107: ReduceLROnPlateau reducing learning rate to 5.859375050931703e-06.
Epoch 108/300
279/279 [==============================] - 0s 100us/step - loss: 0.3314 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 109/300
279/279 [==============================] - ETA: 0s - loss: 0.2823 - accuracy: 0.90 - 0s 100us/step - loss: 0.3314 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 110/300
279/279 [==============================] - 0s 90us/step - loss: 0.3314 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 111/300
279/279 [==============================] - 0s 72us/step - loss: 0.3314 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 112/300
279/279 [==============================] - 0s 107us/step - loss: 0.3314 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 113/300
279/279 [==============================] - 0s 86us/step - loss: 0.3314 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 114/300
279/279 [==============================] - 0s 72us/step - loss: 0.3314 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 115/300
279/279 [==============================] - 0s 104us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 116/300
279/279 [==============================] - 0s 107us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 117/300
279/279 [==============================] - 0s 75us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00117: ReduceLROnPlateau reducing learning rate to 2.9296875254658516e-06.
Epoch 118/300
279/279 [==============================] - 0s 115us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 119/300
279/279 [==============================] - 0s 79us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 120/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 121/300
279/279 [==============================] - 0s 100us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 122/300
279/279 [==============================] - 0s 75us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 123/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 124/300
279/279 [==============================] - 0s 111us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 125/300
279/279 [==============================] - 0s 111us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 126/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 127/300
279/279 [==============================] - 0s 72us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00127: ReduceLROnPlateau reducing learning rate to 1.4648437627329258e-06.
Epoch 128/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 129/300
279/279 [==============================] - 0s 107us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 130/300
279/279 [==============================] - 0s 107us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 131/300
279/279 [==============================] - 0s 111us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 132/300
279/279 [==============================] - 0s 100us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 133/300
279/279 [==============================] - 0s 125us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 134/300
279/279 [==============================] - 0s 104us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 135/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 136/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 137/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00137: ReduceLROnPlateau reducing learning rate to 7.324218813664629e-07.
Epoch 138/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 139/300
279/279 [==============================] - 0s 75us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 140/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 141/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 142/300
279/279 [==============================] - 0s 82us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 143/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 144/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 145/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 146/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 147/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00147: ReduceLROnPlateau reducing learning rate to 3.6621094068323146e-07.
Epoch 148/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 149/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 150/300
279/279 [==============================] - 0s 82us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 151/300
279/279 [==============================] - 0s 107us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 152/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 153/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 154/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 155/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 156/300
279/279 [==============================] - 0s 82us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 157/300
279/279 [==============================] - 0s 104us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00157: ReduceLROnPlateau reducing learning rate to 1.8310547034161573e-07.
Epoch 158/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 159/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 160/300
279/279 [==============================] - 0s 133us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 161/300
279/279 [==============================] - 0s 107us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 162/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 163/300
279/279 [==============================] - 0s 68us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 164/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 165/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 166/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 167/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00167: ReduceLROnPlateau reducing learning rate to 9.155273517080786e-08.
Epoch 168/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 169/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 170/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 171/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 172/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 173/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 174/300
279/279 [==============================] - 0s 168us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 175/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 176/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 177/300
279/279 [==============================] - 0s 111us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00177: ReduceLROnPlateau reducing learning rate to 4.577636758540393e-08.
Epoch 178/300
279/279 [==============================] - 0s 72us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 179/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 180/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 181/300
279/279 [==============================] - 0s 79us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 182/300
279/279 [==============================] - 0s 100us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 183/300
279/279 [==============================] - 0s 104us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 184/300
279/279 [==============================] - ETA: 0s - loss: 0.2749 - accuracy: 0.96 - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 185/300
279/279 [==============================] - 0s 104us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 186/300
279/279 [==============================] - 0s 82us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 187/300
279/279 [==============================] - 0s 82us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00187: ReduceLROnPlateau reducing learning rate to 2.2888183792701966e-08.
Epoch 188/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 189/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 190/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 191/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 192/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 193/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 194/300
279/279 [==============================] - 0s 72us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 195/300
279/279 [==============================] - 0s 100us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 196/300
279/279 [==============================] - 0s 115us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 197/300
279/279 [==============================] - 0s 107us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00197: ReduceLROnPlateau reducing learning rate to 1.1444091896350983e-08.
Epoch 198/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 199/300
279/279 [==============================] - 0s 79us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 200/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 201/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 202/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 203/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 204/300
279/279 [==============================] - 0s 104us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 205/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 206/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 207/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00207: ReduceLROnPlateau reducing learning rate to 5.7220459481754915e-09.
Epoch 208/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 209/300
279/279 [==============================] - 0s 79us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 210/300
279/279 [==============================] - 0s 104us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 211/300
279/279 [==============================] - 0s 100us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 212/300
279/279 [==============================] - 0s 104us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 213/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 214/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 215/300
279/279 [==============================] - 0s 107us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 216/300
279/279 [==============================] - 0s 75us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 217/300
279/279 [==============================] - 0s 82us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00217: ReduceLROnPlateau reducing learning rate to 2.8610229740877458e-09.
Epoch 218/300
279/279 [==============================] - 0s 100us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 219/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 220/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 221/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 222/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 223/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 224/300
279/279 [==============================] - 0s 82us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 225/300
279/279 [==============================] - 0s 82us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 226/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 227/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00227: ReduceLROnPlateau reducing learning rate to 1.4305114870438729e-09.
Epoch 228/300
279/279 [==============================] - 0s 79us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 229/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 230/300
279/279 [==============================] - 0s 82us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 231/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 232/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 233/300
279/279 [==============================] - 0s 122us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 234/300
279/279 [==============================] - 0s 115us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 235/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 236/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 237/300
279/279 [==============================] - 0s 104us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00237: ReduceLROnPlateau reducing learning rate to 7.152557435219364e-10.
Epoch 238/300
279/279 [==============================] - 0s 104us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 239/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 240/300
279/279 [==============================] - 0s 68us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 241/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 242/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 243/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 244/300
279/279 [==============================] - 0s 100us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 245/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 246/300
279/279 [==============================] - 0s 111us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 247/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00247: ReduceLROnPlateau reducing learning rate to 3.576278717609682e-10.
Epoch 248/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 249/300
279/279 [==============================] - 0s 107us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 250/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 251/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 252/300
279/279 [==============================] - 0s 104us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 253/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 254/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 255/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 256/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 257/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00257: ReduceLROnPlateau reducing learning rate to 1.788139358804841e-10.
Epoch 258/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 259/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 260/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 261/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 262/300
279/279 [==============================] - 0s 79us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 263/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 264/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 265/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 266/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 267/300
279/279 [==============================] - 0s 100us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00267: ReduceLROnPlateau reducing learning rate to 8.940696794024205e-11.
Epoch 268/300
279/279 [==============================] - 0s 100us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 269/300
279/279 [==============================] - 0s 111us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 270/300
279/279 [==============================] - 0s 115us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 271/300
279/279 [==============================] - 0s 75us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 272/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 273/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 274/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 275/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 276/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 277/300
279/279 [==============================] - 0s 75us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00277: ReduceLROnPlateau reducing learning rate to 4.470348397012103e-11.
Epoch 278/300
279/279 [==============================] - 0s 82us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 279/300
279/279 [==============================] - 0s 115us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 280/300
279/279 [==============================] - 0s 107us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 281/300
279/279 [==============================] - 0s 107us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 282/300
279/279 [==============================] - 0s 115us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 283/300
279/279 [==============================] - 0s 111us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 284/300
279/279 [==============================] - 0s 107us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 285/300
279/279 [==============================] - 0s 111us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 286/300
279/279 [==============================] - 0s 111us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 287/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00287: ReduceLROnPlateau reducing learning rate to 2.2351741985060514e-11.
Epoch 288/300
279/279 [==============================] - 0s 100us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 289/300
279/279 [==============================] - 0s 104us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 290/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 291/300
279/279 [==============================] - 0s 111us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 292/300
279/279 [==============================] - 0s 115us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 293/300
279/279 [==============================] - 0s 107us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 294/300
279/279 [==============================] - 0s 118us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 295/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 296/300
279/279 [==============================] - 0s 104us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 297/300
279/279 [==============================] - 0s 107us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00297: ReduceLROnPlateau reducing learning rate to 1.1175870992530257e-11.
Epoch 298/300
279/279 [==============================] - 0s 107us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 299/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 300/300
279/279 [==============================] - 0s 100us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
In [43]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 300)
In [44]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
93/93 [==============================] - 0s 54us/step
test loss: 0.48277782432494626, test accuracy: 0.7849462628364563
In [58]:
y_pred = model.predict(X_test)
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
Out[58]:
0.34275618374558303

KMeans

In [59]:
X
Out[59]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13
0 0.303210 1.253016 -0.394054 0.592598 0.106623 -2.083256 0.858313 0.801936 2.380580 -1.304258 0.556361 1.949530 -1.046692
1 0.647559 -1.553511 -1.648243 -1.010792 -0.857927 0.335856 0.884468 0.250703 0.298648 0.683922 1.599907 0.349480 -0.484103
2 2.266625 -0.333664 0.685765 -2.001143 -0.820018 -2.442242 -1.583451 -2.793213 -2.158376 -2.431307 -0.855856 -0.471626 -1.478884
3 0.672266 -0.397422 0.105106 -1.822060 -1.335294 -1.384110 -0.608579 -1.639581 -2.081721 -1.171877 -1.102493 -1.264328 -1.165166
4 0.747622 0.110331 -0.079109 -1.108698 -0.391749 -0.448919 0.133859 -0.843237 -1.924086 -0.222835 0.221819 0.017631 -0.725177
5 2.072441 -1.565884 -0.268750 -1.648648 -3.149219 -3.406801 -4.332461 -1.709990 -1.313696 -1.503431 -2.036749 -1.928213 -2.657430
6 -0.163654 0.470736 0.440011 0.594090 -1.227236 0.409218 0.608496 -0.948833 -2.051031 0.892327 0.371683 -0.318984 0.022251
7 0.477732 -0.217651 -0.908178 -1.257961 -1.360625 -0.551388 -0.599896 0.099066 1.299780 2.443060 0.417236 -0.919898 -0.916391
8 0.897786 -3.040013 0.311694 -0.386220 -0.321124 -0.221380 -0.207002 -0.127210 0.011073 0.211925 -0.400748 -0.296623 -0.143419
9 -0.977087 1.088438 -0.184899 -0.626934 0.577247 0.522552 1.150101 1.023214 0.136257 0.193237 -0.496760 0.367549 0.378679
10 0.328615 -1.292300 -1.398337 -0.677268 -1.070980 -0.075073 -0.740061 -0.424240 -0.216693 0.633892 -0.070397 0.960392 0.403827
11 -0.199470 0.110219 0.238637 0.455154 -0.116209 0.374450 0.078145 0.424005 0.633052 -0.153498 -0.647002 0.301135 -0.000406
12 -0.711256 0.124802 0.734425 -0.445078 -0.503247 -0.323539 0.236246 -0.572803 -0.221112 -0.206486 -0.180516 0.119335 0.027470
13 -0.806898 -0.126740 -0.383726 0.035489 -1.164460 -0.574335 -0.633858 -0.009812 -0.131411 0.549197 -0.257952 0.307916 0.814674
14 -0.077242 -0.331495 0.550493 -0.008575 -0.215759 -1.260552 -0.581296 0.369790 -0.684267 0.792489 -0.457321 -0.704205 -0.093986
15 0.244538 0.777957 0.464181 0.169574 -0.433604 -1.172185 -1.866928 0.759778 -0.372608 0.009766 0.964104 1.082661 -0.506505
16 0.602329 -0.035069 0.178352 -0.036690 0.180302 -0.769568 0.364535 0.996915 0.263984 -0.829872 -0.133422 0.601135 -1.217336
17 -0.570258 -0.759570 0.108993 0.657477 0.342355 -0.903388 0.112467 -0.669060 -0.661619 0.915675 1.620722 -0.160697 0.379275
18 -0.288268 -1.202534 -0.544058 0.295908 0.568680 -1.416228 0.423676 0.041836 -0.665694 0.699155 -0.070704 -0.429451 1.194384
19 -0.497305 -0.552590 0.332470 0.660607 0.293725 -0.945647 -1.269354 0.464095 1.166255 2.034233 2.037855 0.555927 0.423683
20 1.386141 -0.516432 -0.074640 0.751101 -1.151864 0.155819 -1.921431 -3.381158 -1.145758 -1.197084 0.654749 1.636425 0.993236
21 0.076772 0.072900 0.122544 0.799017 -1.121011 -0.137599 -1.150187 -1.669293 -1.110882 -0.047217 -0.034112 -0.659214 1.160642
22 0.670757 -0.167252 -0.352765 0.189499 -1.232602 -0.168579 -1.559900 -1.850665 -1.416478 0.031846 0.308193 -0.956133 0.507231
23 0.639283 0.699164 0.621380 -0.725771 -0.890352 0.643955 -1.097228 0.229756 -0.091793 -2.390193 -0.825768 -2.164531 -0.772983
24 -0.907399 2.155157 0.873522 1.655111 0.871099 1.083262 -0.186962 0.373227 -0.354082 0.573586 0.733097 -0.986481 0.727511
25 -1.152272 1.601470 0.221927 1.296592 0.572807 0.581774 -0.479257 0.209504 -0.636178 0.574450 0.615706 -0.877894 0.941827
26 -0.676596 0.405600 0.553370 0.691531 -0.292469 0.626694 0.080413 0.246868 -0.100975 0.606694 -0.024154 -1.553730 -0.210884
27 -0.704834 -0.058170 0.609171 -0.735340 -0.512747 0.796018 -0.405976 0.502120 0.717380 -1.625431 0.825742 -1.663942 -0.379395
28 0.273095 1.014503 -0.772750 -0.065028 -0.513485 0.235377 -0.266144 1.373964 0.711880 -1.261758 1.106463 0.515863 0.555866
29 1.216372 0.637021 0.649194 0.099873 -0.816614 0.555439 -1.272918 -0.035862 0.154194 -1.797465 -0.177830 -1.702118 -1.136716
... ... ... ... ... ... ... ... ... ... ... ... ... ...
342 0.056741 0.169776 0.434163 -0.208821 0.498957 0.349198 0.547068 0.250228 -0.743894 -0.497075 0.373497 -0.025547 0.260645
343 -0.845812 -0.163165 0.268174 1.312135 1.241686 1.484484 1.279782 -0.350179 -0.266719 -0.170434 0.147000 -0.259175 -0.323251
344 0.056854 0.089458 -0.128149 0.123107 -0.879175 0.172486 0.919301 0.727007 -0.032509 -0.594358 -0.241536 -0.339538 -1.563800
345 1.322735 -0.970372 -1.058427 1.018282 -1.228871 0.835533 1.462831 -1.481872 -2.024441 0.388890 2.395768 -0.993539 0.301816
346 -0.159679 -0.200313 -0.181878 0.221536 -0.604018 0.554979 0.173592 -1.137738 -1.525377 -0.382164 1.156959 0.545188 -0.873936
347 -0.510690 -0.141874 -0.170690 -0.486309 -1.066447 -1.098392 -1.513393 -0.202811 0.062343 0.446348 -0.029988 -0.024432 -0.978036
348 0.441393 0.403987 0.538948 1.253198 -0.158511 0.497768 0.151471 -0.006025 0.213458 0.119760 -0.002312 0.139434 -0.401118
349 0.548477 0.987769 0.505748 0.779668 0.504327 -0.003400 0.200264 0.287803 0.084852 -0.044437 0.769553 0.169816 -0.581506
350 0.278851 -0.150632 1.015313 0.158731 -1.435466 -0.910636 1.526971 0.810376 -0.088268 2.273901 1.895682 -0.573207 1.173543
351 1.781784 -0.680962 -0.140043 1.730156 0.760657 1.081874 0.686370 -0.456141 -0.310319 0.443108 0.067726 -0.804283 0.268616
352 1.110023 -0.419764 -0.451242 1.471440 0.860531 0.858025 1.016472 0.013533 -0.532955 0.597255 -0.385255 -1.299309 0.869963
353 0.463780 0.094111 0.074193 0.457058 -0.494585 -0.741218 -1.615368 -0.323890 0.179301 -0.914854 -0.881275 -0.284568 0.516848
354 0.162857 1.300630 -0.374191 -0.148478 -0.275205 0.936621 -0.301931 0.926288 -0.242039 -1.217862 -0.849053 0.381655 1.521222
355 -0.261040 1.897992 0.324175 0.250461 -0.326921 0.078347 -0.794723 1.245895 0.561437 0.299601 0.612062 0.375109 0.668225
356 -2.412627 -0.912657 0.924859 1.091412 -0.430459 0.991776 0.577087 0.366311 0.916132 -0.010096 -0.337066 0.723121 0.634413
357 -1.610420 -0.171488 1.308910 1.557149 -0.783120 1.055891 0.070922 0.736289 0.651236 -0.209692 -0.293388 0.549580 0.947465
358 -1.627642 -0.225022 1.420291 1.585386 -0.623077 1.204209 0.203574 0.815228 0.701131 -0.111706 -0.352897 0.552444 1.038487
359 1.658650 0.261694 0.694273 -0.634006 -0.742717 -1.107684 -0.040641 0.685375 0.704374 0.457634 -0.012812 -0.227444 -0.311482
360 -0.472450 1.290735 1.251486 0.902820 1.064267 0.319911 0.273062 -0.004026 -0.730129 -0.487802 -0.590033 0.917054 0.316796
361 0.145973 1.078298 -0.110458 0.396705 0.465683 0.120005 0.324478 0.647014 0.406366 0.303529 0.342183 0.418467 -0.257006
362 1.354053 0.408020 -1.449365 -0.144038 0.735070 1.458916 -0.253049 0.476118 1.309448 1.981607 0.319930 -0.734588 -2.427842
363 3.546326 -0.337767 -0.983896 -3.155084 1.922015 3.128359 1.576092 2.767242 2.734920 1.749030 -1.432287 -5.486282 -3.776088
364 3.564797 -0.492960 -0.663172 -2.465245 2.044991 3.045697 1.746383 2.238430 2.806354 2.318786 -0.732814 -5.203217 -4.762769
365 -0.480041 0.390140 0.283493 0.710367 0.436247 0.787936 0.149057 1.081200 1.130496 0.783116 1.174331 0.987069 1.027523
366 1.601344 1.120977 0.942690 0.218542 1.432015 1.975393 1.352637 0.851851 0.707687 1.420656 -1.301018 0.996552 2.286308
367 -1.388425 0.554214 2.322455 0.125526 0.168411 1.459935 -0.011567 0.377516 1.801634 1.061665 1.432895 1.553952 0.335629
368 -0.560849 0.191976 -1.558597 -1.925355 0.006144 1.582531 2.334107 1.472221 0.788027 -0.066399 -0.539592 -0.089987 1.081026
369 -0.015724 0.095384 -0.050287 0.330265 -0.857518 -1.110181 0.457976 1.235890 0.937447 1.294180 1.196429 1.964913 0.788473
370 -0.320091 0.789370 -0.347116 -0.257819 -0.264866 0.119392 0.174557 -0.102622 -0.147261 0.927949 1.494185 0.617596 -0.119013
371 -1.397911 0.969347 -0.218602 0.165675 -1.187201 -1.022111 0.090487 -0.281353 -1.520146 -0.950575 -1.733689 -0.924046 -0.731033

372 rows × 13 columns

In [60]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[60]:
[4836.0,
 4367.616182778379,
 3974.0733975762073,
 3720.4477504020774,
 3549.9823424876267,
 3386.093163495573,
 3254.3815559758523,
 3136.90996141146,
 3048.6934734702136,
 2957.186911982338,
 2855.333306370868,
 2802.0220333671496,
 2712.5965714921504,
 2657.189981994876]
In [89]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[89]:
[<matplotlib.lines.Line2D at 0x244598a8630>]

K=3

In [81]:
kmeans_mfcc = KMeans(n_clusters=3, random_state=0, n_init=10)
kmeans_mfcc.fit(X)
Out[81]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=3, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [82]:
kmeans_mfcc.labels_
Out[82]:
array([1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0,
       0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1,
       1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1,
       0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0,
       1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,
       0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1,
       1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1,
       1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1,
       1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 1,
       1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0,
       1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
       0, 1, 1, 2, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0,
       1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1,
       1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1,
       1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 0])
In [83]:
clusters_mfcc = kmeans_mfcc.predict(X)
clusters_mfcc
Out[83]:
array([1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0,
       0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1,
       1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1,
       0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0,
       1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,
       0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1,
       1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1,
       1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1,
       1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 1,
       1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0,
       1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
       0, 1, 1, 2, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0,
       1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1,
       1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1,
       1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 0])
In [84]:
X.loc[:,'Cluster'] = clusters_mfcc
X.loc[:,'chosen'] = list(y)
In [85]:
X
Out[85]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13 Cluster chosen
0 0.303210 1.253016 -0.394054 0.592598 0.106623 -2.083256 0.858313 0.801936 2.380580 -1.304258 0.556361 1.949530 -1.046692 1 0
1 0.647559 -1.553511 -1.648243 -1.010792 -0.857927 0.335856 0.884468 0.250703 0.298648 0.683922 1.599907 0.349480 -0.484103 1 0
2 2.266625 -0.333664 0.685765 -2.001143 -0.820018 -2.442242 -1.583451 -2.793213 -2.158376 -2.431307 -0.855856 -0.471626 -1.478884 0 0
3 0.672266 -0.397422 0.105106 -1.822060 -1.335294 -1.384110 -0.608579 -1.639581 -2.081721 -1.171877 -1.102493 -1.264328 -1.165166 0 0
4 0.747622 0.110331 -0.079109 -1.108698 -0.391749 -0.448919 0.133859 -0.843237 -1.924086 -0.222835 0.221819 0.017631 -0.725177 0 0
5 2.072441 -1.565884 -0.268750 -1.648648 -3.149219 -3.406801 -4.332461 -1.709990 -1.313696 -1.503431 -2.036749 -1.928213 -2.657430 0 0
6 -0.163654 0.470736 0.440011 0.594090 -1.227236 0.409218 0.608496 -0.948833 -2.051031 0.892327 0.371683 -0.318984 0.022251 0 0
7 0.477732 -0.217651 -0.908178 -1.257961 -1.360625 -0.551388 -0.599896 0.099066 1.299780 2.443060 0.417236 -0.919898 -0.916391 1 0
8 0.897786 -3.040013 0.311694 -0.386220 -0.321124 -0.221380 -0.207002 -0.127210 0.011073 0.211925 -0.400748 -0.296623 -0.143419 0 0
9 -0.977087 1.088438 -0.184899 -0.626934 0.577247 0.522552 1.150101 1.023214 0.136257 0.193237 -0.496760 0.367549 0.378679 1 0
10 0.328615 -1.292300 -1.398337 -0.677268 -1.070980 -0.075073 -0.740061 -0.424240 -0.216693 0.633892 -0.070397 0.960392 0.403827 0 0
11 -0.199470 0.110219 0.238637 0.455154 -0.116209 0.374450 0.078145 0.424005 0.633052 -0.153498 -0.647002 0.301135 -0.000406 1 0
12 -0.711256 0.124802 0.734425 -0.445078 -0.503247 -0.323539 0.236246 -0.572803 -0.221112 -0.206486 -0.180516 0.119335 0.027470 0 0
13 -0.806898 -0.126740 -0.383726 0.035489 -1.164460 -0.574335 -0.633858 -0.009812 -0.131411 0.549197 -0.257952 0.307916 0.814674 0 0
14 -0.077242 -0.331495 0.550493 -0.008575 -0.215759 -1.260552 -0.581296 0.369790 -0.684267 0.792489 -0.457321 -0.704205 -0.093986 0 0
15 0.244538 0.777957 0.464181 0.169574 -0.433604 -1.172185 -1.866928 0.759778 -0.372608 0.009766 0.964104 1.082661 -0.506505 0 0
16 0.602329 -0.035069 0.178352 -0.036690 0.180302 -0.769568 0.364535 0.996915 0.263984 -0.829872 -0.133422 0.601135 -1.217336 1 0
17 -0.570258 -0.759570 0.108993 0.657477 0.342355 -0.903388 0.112467 -0.669060 -0.661619 0.915675 1.620722 -0.160697 0.379275 0 0
18 -0.288268 -1.202534 -0.544058 0.295908 0.568680 -1.416228 0.423676 0.041836 -0.665694 0.699155 -0.070704 -0.429451 1.194384 0 0
19 -0.497305 -0.552590 0.332470 0.660607 0.293725 -0.945647 -1.269354 0.464095 1.166255 2.034233 2.037855 0.555927 0.423683 1 0
20 1.386141 -0.516432 -0.074640 0.751101 -1.151864 0.155819 -1.921431 -3.381158 -1.145758 -1.197084 0.654749 1.636425 0.993236 0 0
21 0.076772 0.072900 0.122544 0.799017 -1.121011 -0.137599 -1.150187 -1.669293 -1.110882 -0.047217 -0.034112 -0.659214 1.160642 0 0
22 0.670757 -0.167252 -0.352765 0.189499 -1.232602 -0.168579 -1.559900 -1.850665 -1.416478 0.031846 0.308193 -0.956133 0.507231 0 0
23 0.639283 0.699164 0.621380 -0.725771 -0.890352 0.643955 -1.097228 0.229756 -0.091793 -2.390193 -0.825768 -2.164531 -0.772983 1 0
24 -0.907399 2.155157 0.873522 1.655111 0.871099 1.083262 -0.186962 0.373227 -0.354082 0.573586 0.733097 -0.986481 0.727511 1 0
25 -1.152272 1.601470 0.221927 1.296592 0.572807 0.581774 -0.479257 0.209504 -0.636178 0.574450 0.615706 -0.877894 0.941827 1 0
26 -0.676596 0.405600 0.553370 0.691531 -0.292469 0.626694 0.080413 0.246868 -0.100975 0.606694 -0.024154 -1.553730 -0.210884 1 0
27 -0.704834 -0.058170 0.609171 -0.735340 -0.512747 0.796018 -0.405976 0.502120 0.717380 -1.625431 0.825742 -1.663942 -0.379395 1 0
28 0.273095 1.014503 -0.772750 -0.065028 -0.513485 0.235377 -0.266144 1.373964 0.711880 -1.261758 1.106463 0.515863 0.555866 1 0
29 1.216372 0.637021 0.649194 0.099873 -0.816614 0.555439 -1.272918 -0.035862 0.154194 -1.797465 -0.177830 -1.702118 -1.136716 1 0
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
342 0.056741 0.169776 0.434163 -0.208821 0.498957 0.349198 0.547068 0.250228 -0.743894 -0.497075 0.373497 -0.025547 0.260645 1 1
343 -0.845812 -0.163165 0.268174 1.312135 1.241686 1.484484 1.279782 -0.350179 -0.266719 -0.170434 0.147000 -0.259175 -0.323251 1 1
344 0.056854 0.089458 -0.128149 0.123107 -0.879175 0.172486 0.919301 0.727007 -0.032509 -0.594358 -0.241536 -0.339538 -1.563800 1 1
345 1.322735 -0.970372 -1.058427 1.018282 -1.228871 0.835533 1.462831 -1.481872 -2.024441 0.388890 2.395768 -0.993539 0.301816 0 1
346 -0.159679 -0.200313 -0.181878 0.221536 -0.604018 0.554979 0.173592 -1.137738 -1.525377 -0.382164 1.156959 0.545188 -0.873936 0 1
347 -0.510690 -0.141874 -0.170690 -0.486309 -1.066447 -1.098392 -1.513393 -0.202811 0.062343 0.446348 -0.029988 -0.024432 -0.978036 0 1
348 0.441393 0.403987 0.538948 1.253198 -0.158511 0.497768 0.151471 -0.006025 0.213458 0.119760 -0.002312 0.139434 -0.401118 1 1
349 0.548477 0.987769 0.505748 0.779668 0.504327 -0.003400 0.200264 0.287803 0.084852 -0.044437 0.769553 0.169816 -0.581506 1 1
350 0.278851 -0.150632 1.015313 0.158731 -1.435466 -0.910636 1.526971 0.810376 -0.088268 2.273901 1.895682 -0.573207 1.173543 1 1
351 1.781784 -0.680962 -0.140043 1.730156 0.760657 1.081874 0.686370 -0.456141 -0.310319 0.443108 0.067726 -0.804283 0.268616 1 1
352 1.110023 -0.419764 -0.451242 1.471440 0.860531 0.858025 1.016472 0.013533 -0.532955 0.597255 -0.385255 -1.299309 0.869963 1 1
353 0.463780 0.094111 0.074193 0.457058 -0.494585 -0.741218 -1.615368 -0.323890 0.179301 -0.914854 -0.881275 -0.284568 0.516848 0 1
354 0.162857 1.300630 -0.374191 -0.148478 -0.275205 0.936621 -0.301931 0.926288 -0.242039 -1.217862 -0.849053 0.381655 1.521222 1 1
355 -0.261040 1.897992 0.324175 0.250461 -0.326921 0.078347 -0.794723 1.245895 0.561437 0.299601 0.612062 0.375109 0.668225 1 1
356 -2.412627 -0.912657 0.924859 1.091412 -0.430459 0.991776 0.577087 0.366311 0.916132 -0.010096 -0.337066 0.723121 0.634413 1 1
357 -1.610420 -0.171488 1.308910 1.557149 -0.783120 1.055891 0.070922 0.736289 0.651236 -0.209692 -0.293388 0.549580 0.947465 1 1
358 -1.627642 -0.225022 1.420291 1.585386 -0.623077 1.204209 0.203574 0.815228 0.701131 -0.111706 -0.352897 0.552444 1.038487 1 1
359 1.658650 0.261694 0.694273 -0.634006 -0.742717 -1.107684 -0.040641 0.685375 0.704374 0.457634 -0.012812 -0.227444 -0.311482 1 1
360 -0.472450 1.290735 1.251486 0.902820 1.064267 0.319911 0.273062 -0.004026 -0.730129 -0.487802 -0.590033 0.917054 0.316796 1 1
361 0.145973 1.078298 -0.110458 0.396705 0.465683 0.120005 0.324478 0.647014 0.406366 0.303529 0.342183 0.418467 -0.257006 1 1
362 1.354053 0.408020 -1.449365 -0.144038 0.735070 1.458916 -0.253049 0.476118 1.309448 1.981607 0.319930 -0.734588 -2.427842 1 1
363 3.546326 -0.337767 -0.983896 -3.155084 1.922015 3.128359 1.576092 2.767242 2.734920 1.749030 -1.432287 -5.486282 -3.776088 2 1
364 3.564797 -0.492960 -0.663172 -2.465245 2.044991 3.045697 1.746383 2.238430 2.806354 2.318786 -0.732814 -5.203217 -4.762769 2 1
365 -0.480041 0.390140 0.283493 0.710367 0.436247 0.787936 0.149057 1.081200 1.130496 0.783116 1.174331 0.987069 1.027523 1 1
366 1.601344 1.120977 0.942690 0.218542 1.432015 1.975393 1.352637 0.851851 0.707687 1.420656 -1.301018 0.996552 2.286308 1 1
367 -1.388425 0.554214 2.322455 0.125526 0.168411 1.459935 -0.011567 0.377516 1.801634 1.061665 1.432895 1.553952 0.335629 1 1
368 -0.560849 0.191976 -1.558597 -1.925355 0.006144 1.582531 2.334107 1.472221 0.788027 -0.066399 -0.539592 -0.089987 1.081026 1 1
369 -0.015724 0.095384 -0.050287 0.330265 -0.857518 -1.110181 0.457976 1.235890 0.937447 1.294180 1.196429 1.964913 0.788473 1 1
370 -0.320091 0.789370 -0.347116 -0.257819 -0.264866 0.119392 0.174557 -0.102622 -0.147261 0.927949 1.494185 0.617596 -0.119013 1 1
371 -1.397911 0.969347 -0.218602 0.165675 -1.187201 -1.022111 0.090487 -0.281353 -1.520146 -0.950575 -1.733689 -0.924046 -0.731033 0 1

372 rows × 15 columns

In [86]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[86]:
<matplotlib.axes._subplots.AxesSubplot at 0x2445d307358>
In [26]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[0]))

Arte Francés

ANN

In [16]:
X = df_n_ps_std_mfcc[1]
In [17]:
y = df_n_ps[1]['chosen']
In [18]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [19]:
X_train.shape
Out[19]:
(279, 13)
In [24]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [25]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [26]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [27]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'tanh', 'hidden_layer_sizes': (20,), 'learning_rate_init': 0.006, 'max_iter': 300}, que permiten obtener un Accuracy de 82.08% y un Kappa del 43.49
Tiempo total: 29.12 minutos
C:\ProgramData\Anaconda3\lib\site-packages\sklearn\neural_network\multilayer_perceptron.py:564: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (300) reached and the optimization hasn't converged yet.
  % self.max_iter, ConvergenceWarning)
In [35]:
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = 0.006
epochs = 300
In [36]:
input_tensor = Input(shape = (n0,))
In [37]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = 'tanh')(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [38]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [39]:
model.summary()
Model: "model_2"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_2 (InputLayer)         (None, 13)                0         
_________________________________________________________________
dense_3 (Dense)              (None, 20)                280       
_________________________________________________________________
dense_4 (Dense)              (None, 1)                 21        
=================================================================
Total params: 301
Trainable params: 301
Non-trainable params: 0
_________________________________________________________________
In [40]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), batch_size= 32,
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 279 samples, validate on 93 samples
Epoch 1/300
279/279 [==============================] - 1s 3ms/step - loss: 0.7063 - accuracy: 0.5197 - val_loss: 0.6660 - val_accuracy: 0.5914
Epoch 2/300
279/279 [==============================] - 0s 61us/step - loss: 0.6003 - accuracy: 0.6918 - val_loss: 0.5741 - val_accuracy: 0.7419
Epoch 3/300
279/279 [==============================] - 0s 68us/step - loss: 0.5317 - accuracy: 0.7348 - val_loss: 0.5278 - val_accuracy: 0.7527
Epoch 4/300
279/279 [==============================] - 0s 72us/step - loss: 0.4996 - accuracy: 0.7634 - val_loss: 0.5053 - val_accuracy: 0.7742
Epoch 5/300
279/279 [==============================] - 0s 75us/step - loss: 0.4770 - accuracy: 0.7778 - val_loss: 0.4938 - val_accuracy: 0.8065
Epoch 6/300
279/279 [==============================] - 0s 90us/step - loss: 0.4639 - accuracy: 0.7778 - val_loss: 0.4845 - val_accuracy: 0.7957
Epoch 7/300
279/279 [==============================] - 0s 82us/step - loss: 0.4526 - accuracy: 0.7849 - val_loss: 0.4813 - val_accuracy: 0.8172
Epoch 8/300
279/279 [==============================] - 0s 75us/step - loss: 0.4466 - accuracy: 0.7921 - val_loss: 0.4822 - val_accuracy: 0.8065
Epoch 9/300
279/279 [==============================] - 0s 79us/step - loss: 0.4412 - accuracy: 0.7993 - val_loss: 0.4819 - val_accuracy: 0.8172
Epoch 10/300
279/279 [==============================] - 0s 82us/step - loss: 0.4373 - accuracy: 0.8065 - val_loss: 0.4840 - val_accuracy: 0.7957
Epoch 11/300
279/279 [==============================] - 0s 86us/step - loss: 0.4343 - accuracy: 0.8136 - val_loss: 0.4827 - val_accuracy: 0.7957
Epoch 12/300
279/279 [==============================] - 0s 75us/step - loss: 0.4328 - accuracy: 0.8100 - val_loss: 0.4866 - val_accuracy: 0.7849
Epoch 13/300
279/279 [==============================] - 0s 82us/step - loss: 0.4225 - accuracy: 0.8136 - val_loss: 0.4860 - val_accuracy: 0.7957
Epoch 14/300
279/279 [==============================] - 0s 75us/step - loss: 0.4186 - accuracy: 0.8136 - val_loss: 0.4865 - val_accuracy: 0.7849
Epoch 15/300
279/279 [==============================] - 0s 82us/step - loss: 0.4118 - accuracy: 0.8136 - val_loss: 0.4846 - val_accuracy: 0.7849
Epoch 16/300
279/279 [==============================] - 0s 79us/step - loss: 0.4080 - accuracy: 0.8208 - val_loss: 0.4901 - val_accuracy: 0.7849
Epoch 17/300
279/279 [==============================] - 0s 107us/step - loss: 0.4009 - accuracy: 0.8351 - val_loss: 0.4878 - val_accuracy: 0.7742

Epoch 00017: ReduceLROnPlateau reducing learning rate to 0.003000000026077032.
Epoch 18/300
279/279 [==============================] - 0s 107us/step - loss: 0.3950 - accuracy: 0.8387 - val_loss: 0.4864 - val_accuracy: 0.7742
Epoch 19/300
279/279 [==============================] - 0s 68us/step - loss: 0.3922 - accuracy: 0.8387 - val_loss: 0.4852 - val_accuracy: 0.7742
Epoch 20/300
279/279 [==============================] - 0s 86us/step - loss: 0.3886 - accuracy: 0.8530 - val_loss: 0.4807 - val_accuracy: 0.7849
Epoch 21/300
279/279 [==============================] - 0s 64us/step - loss: 0.3865 - accuracy: 0.8566 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 22/300
279/279 [==============================] - 0s 82us/step - loss: 0.3828 - accuracy: 0.8530 - val_loss: 0.4848 - val_accuracy: 0.7849
Epoch 23/300
279/279 [==============================] - 0s 79us/step - loss: 0.3800 - accuracy: 0.8566 - val_loss: 0.4855 - val_accuracy: 0.7849
Epoch 24/300
279/279 [==============================] - 0s 68us/step - loss: 0.3760 - accuracy: 0.8638 - val_loss: 0.4818 - val_accuracy: 0.7849
Epoch 25/300
279/279 [==============================] - 0s 79us/step - loss: 0.3744 - accuracy: 0.8602 - val_loss: 0.4826 - val_accuracy: 0.7849
Epoch 26/300
279/279 [==============================] - 0s 86us/step - loss: 0.3705 - accuracy: 0.8710 - val_loss: 0.4781 - val_accuracy: 0.7849
Epoch 27/300
279/279 [==============================] - 0s 86us/step - loss: 0.3666 - accuracy: 0.8746 - val_loss: 0.4785 - val_accuracy: 0.7849

Epoch 00027: ReduceLROnPlateau reducing learning rate to 0.001500000013038516.
Epoch 28/300
279/279 [==============================] - 0s 79us/step - loss: 0.3637 - accuracy: 0.8746 - val_loss: 0.4799 - val_accuracy: 0.7849
Epoch 29/300
279/279 [==============================] - 0s 75us/step - loss: 0.3620 - accuracy: 0.8746 - val_loss: 0.4816 - val_accuracy: 0.7849
Epoch 30/300
279/279 [==============================] - 0s 79us/step - loss: 0.3604 - accuracy: 0.8746 - val_loss: 0.4826 - val_accuracy: 0.7849
Epoch 31/300
279/279 [==============================] - 0s 75us/step - loss: 0.3596 - accuracy: 0.8746 - val_loss: 0.4831 - val_accuracy: 0.7849
Epoch 32/300
279/279 [==============================] - 0s 79us/step - loss: 0.3572 - accuracy: 0.8781 - val_loss: 0.4832 - val_accuracy: 0.7849
Epoch 33/300
279/279 [==============================] - 0s 79us/step - loss: 0.3555 - accuracy: 0.8781 - val_loss: 0.4817 - val_accuracy: 0.7849
Epoch 34/300
279/279 [==============================] - 0s 79us/step - loss: 0.3540 - accuracy: 0.8817 - val_loss: 0.4829 - val_accuracy: 0.7849
Epoch 35/300
279/279 [==============================] - 0s 82us/step - loss: 0.3527 - accuracy: 0.8817 - val_loss: 0.4817 - val_accuracy: 0.7849
Epoch 36/300
279/279 [==============================] - 0s 75us/step - loss: 0.3510 - accuracy: 0.8817 - val_loss: 0.4829 - val_accuracy: 0.7849
Epoch 37/300
279/279 [==============================] - 0s 104us/step - loss: 0.3493 - accuracy: 0.8781 - val_loss: 0.4836 - val_accuracy: 0.7849

Epoch 00037: ReduceLROnPlateau reducing learning rate to 0.000750000006519258.
Epoch 38/300
279/279 [==============================] - 0s 86us/step - loss: 0.3476 - accuracy: 0.8817 - val_loss: 0.4825 - val_accuracy: 0.7849
Epoch 39/300
279/279 [==============================] - 0s 82us/step - loss: 0.3469 - accuracy: 0.8817 - val_loss: 0.4823 - val_accuracy: 0.7849
Epoch 40/300
279/279 [==============================] - 0s 75us/step - loss: 0.3457 - accuracy: 0.8817 - val_loss: 0.4825 - val_accuracy: 0.7849
Epoch 41/300
279/279 [==============================] - 0s 82us/step - loss: 0.3449 - accuracy: 0.8817 - val_loss: 0.4826 - val_accuracy: 0.7849
Epoch 42/300
279/279 [==============================] - 0s 90us/step - loss: 0.3443 - accuracy: 0.8817 - val_loss: 0.4830 - val_accuracy: 0.7849
Epoch 43/300
279/279 [==============================] - 0s 104us/step - loss: 0.3434 - accuracy: 0.8817 - val_loss: 0.4833 - val_accuracy: 0.7849
Epoch 44/300
279/279 [==============================] - 0s 93us/step - loss: 0.3427 - accuracy: 0.8817 - val_loss: 0.4835 - val_accuracy: 0.7849
Epoch 45/300
279/279 [==============================] - 0s 90us/step - loss: 0.3418 - accuracy: 0.8817 - val_loss: 0.4831 - val_accuracy: 0.7849
Epoch 46/300
279/279 [==============================] - 0s 93us/step - loss: 0.3411 - accuracy: 0.8817 - val_loss: 0.4832 - val_accuracy: 0.7849
Epoch 47/300
279/279 [==============================] - 0s 97us/step - loss: 0.3403 - accuracy: 0.8817 - val_loss: 0.4832 - val_accuracy: 0.7849

Epoch 00047: ReduceLROnPlateau reducing learning rate to 0.000375000003259629.
Epoch 48/300
279/279 [==============================] - 0s 111us/step - loss: 0.3394 - accuracy: 0.8817 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 49/300
279/279 [==============================] - 0s 100us/step - loss: 0.3389 - accuracy: 0.8817 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 50/300
279/279 [==============================] - 0s 111us/step - loss: 0.3386 - accuracy: 0.8817 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 51/300
279/279 [==============================] - 0s 115us/step - loss: 0.3381 - accuracy: 0.8817 - val_loss: 0.4827 - val_accuracy: 0.7849
Epoch 52/300
279/279 [==============================] - 0s 136us/step - loss: 0.3378 - accuracy: 0.8817 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 53/300
279/279 [==============================] - 0s 118us/step - loss: 0.3373 - accuracy: 0.8817 - val_loss: 0.4827 - val_accuracy: 0.7849
Epoch 54/300
279/279 [==============================] - 0s 86us/step - loss: 0.3370 - accuracy: 0.8817 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 55/300
279/279 [==============================] - 0s 115us/step - loss: 0.3365 - accuracy: 0.8817 - val_loss: 0.4825 - val_accuracy: 0.7849
Epoch 56/300
279/279 [==============================] - 0s 107us/step - loss: 0.3362 - accuracy: 0.8817 - val_loss: 0.4827 - val_accuracy: 0.7849
Epoch 57/300
279/279 [==============================] - 0s 122us/step - loss: 0.3358 - accuracy: 0.8853 - val_loss: 0.4830 - val_accuracy: 0.7849

Epoch 00057: ReduceLROnPlateau reducing learning rate to 0.0001875000016298145.
Epoch 58/300
279/279 [==============================] - 0s 104us/step - loss: 0.3353 - accuracy: 0.8853 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 59/300
279/279 [==============================] - 0s 107us/step - loss: 0.3351 - accuracy: 0.8853 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 60/300
279/279 [==============================] - 0s 104us/step - loss: 0.3349 - accuracy: 0.8853 - val_loss: 0.4829 - val_accuracy: 0.7849
Epoch 61/300
279/279 [==============================] - 0s 104us/step - loss: 0.3347 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 62/300
279/279 [==============================] - 0s 100us/step - loss: 0.3345 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 63/300
279/279 [==============================] - 0s 79us/step - loss: 0.3343 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 64/300
279/279 [==============================] - 0s 90us/step - loss: 0.3341 - accuracy: 0.8889 - val_loss: 0.4829 - val_accuracy: 0.7849
Epoch 65/300
279/279 [==============================] - 0s 93us/step - loss: 0.3339 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 66/300
279/279 [==============================] - 0s 104us/step - loss: 0.3337 - accuracy: 0.8889 - val_loss: 0.4829 - val_accuracy: 0.7849
Epoch 67/300
279/279 [==============================] - 0s 79us/step - loss: 0.3335 - accuracy: 0.8889 - val_loss: 0.4826 - val_accuracy: 0.7849

Epoch 00067: ReduceLROnPlateau reducing learning rate to 9.375000081490725e-05.
Epoch 68/300
279/279 [==============================] - 0s 100us/step - loss: 0.3333 - accuracy: 0.8889 - val_loss: 0.4827 - val_accuracy: 0.7849
Epoch 69/300
279/279 [==============================] - 0s 104us/step - loss: 0.3332 - accuracy: 0.8889 - val_loss: 0.4827 - val_accuracy: 0.7849
Epoch 70/300
279/279 [==============================] - 0s 90us/step - loss: 0.3331 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 71/300
279/279 [==============================] - 0s 100us/step - loss: 0.3330 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 72/300
279/279 [==============================] - 0s 107us/step - loss: 0.3329 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 73/300
279/279 [==============================] - 0s 104us/step - loss: 0.3328 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 74/300
279/279 [==============================] - 0s 118us/step - loss: 0.3327 - accuracy: 0.8889 - val_loss: 0.4827 - val_accuracy: 0.7849
Epoch 75/300
279/279 [==============================] - 0s 100us/step - loss: 0.3326 - accuracy: 0.8889 - val_loss: 0.4827 - val_accuracy: 0.7849
Epoch 76/300
279/279 [==============================] - 0s 90us/step - loss: 0.3325 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 77/300
279/279 [==============================] - 0s 72us/step - loss: 0.3324 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00077: ReduceLROnPlateau reducing learning rate to 4.6875000407453626e-05.
Epoch 78/300
279/279 [==============================] - 0s 111us/step - loss: 0.3323 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 79/300
279/279 [==============================] - 0s 97us/step - loss: 0.3322 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 80/300
279/279 [==============================] - 0s 97us/step - loss: 0.3322 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 81/300
279/279 [==============================] - 0s 97us/step - loss: 0.3321 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 82/300
279/279 [==============================] - 0s 104us/step - loss: 0.3321 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 83/300
279/279 [==============================] - 0s 86us/step - loss: 0.3320 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 84/300
279/279 [==============================] - 0s 86us/step - loss: 0.3320 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 85/300
279/279 [==============================] - 0s 111us/step - loss: 0.3319 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 86/300
279/279 [==============================] - 0s 93us/step - loss: 0.3319 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 87/300
279/279 [==============================] - 0s 90us/step - loss: 0.3318 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00087: ReduceLROnPlateau reducing learning rate to 2.3437500203726813e-05.
Epoch 88/300
279/279 [==============================] - 0s 90us/step - loss: 0.3318 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 89/300
279/279 [==============================] - 0s 104us/step - loss: 0.3317 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 90/300
279/279 [==============================] - 0s 100us/step - loss: 0.3317 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 91/300
279/279 [==============================] - 0s 93us/step - loss: 0.3317 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 92/300
279/279 [==============================] - 0s 93us/step - loss: 0.3317 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 93/300
279/279 [==============================] - 0s 93us/step - loss: 0.3316 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 94/300
279/279 [==============================] - 0s 100us/step - loss: 0.3316 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 95/300
279/279 [==============================] - 0s 90us/step - loss: 0.3316 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 96/300
279/279 [==============================] - 0s 97us/step - loss: 0.3316 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 97/300
279/279 [==============================] - 0s 100us/step - loss: 0.3315 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00097: ReduceLROnPlateau reducing learning rate to 1.1718750101863407e-05.
Epoch 98/300
279/279 [==============================] - 0s 97us/step - loss: 0.3315 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 99/300
279/279 [==============================] - ETA: 0s - loss: 0.3474 - accuracy: 0.81 - 0s 93us/step - loss: 0.3315 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 100/300
279/279 [==============================] - ETA: 0s - loss: 0.2551 - accuracy: 0.93 - 0s 93us/step - loss: 0.3315 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 101/300
279/279 [==============================] - 0s 107us/step - loss: 0.3315 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 102/300
279/279 [==============================] - 0s 93us/step - loss: 0.3315 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 103/300
279/279 [==============================] - 0s 86us/step - loss: 0.3315 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 104/300
279/279 [==============================] - 0s 97us/step - loss: 0.3314 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 105/300
279/279 [==============================] - 0s 97us/step - loss: 0.3314 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 106/300
279/279 [==============================] - 0s 97us/step - loss: 0.3314 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 107/300
279/279 [==============================] - 0s 97us/step - loss: 0.3314 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00107: ReduceLROnPlateau reducing learning rate to 5.859375050931703e-06.
Epoch 108/300
279/279 [==============================] - 0s 100us/step - loss: 0.3314 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 109/300
279/279 [==============================] - ETA: 0s - loss: 0.2823 - accuracy: 0.90 - 0s 100us/step - loss: 0.3314 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 110/300
279/279 [==============================] - 0s 90us/step - loss: 0.3314 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 111/300
279/279 [==============================] - 0s 72us/step - loss: 0.3314 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 112/300
279/279 [==============================] - 0s 107us/step - loss: 0.3314 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 113/300
279/279 [==============================] - 0s 86us/step - loss: 0.3314 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 114/300
279/279 [==============================] - 0s 72us/step - loss: 0.3314 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 115/300
279/279 [==============================] - 0s 104us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 116/300
279/279 [==============================] - 0s 107us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 117/300
279/279 [==============================] - 0s 75us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00117: ReduceLROnPlateau reducing learning rate to 2.9296875254658516e-06.
Epoch 118/300
279/279 [==============================] - 0s 115us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 119/300
279/279 [==============================] - 0s 79us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 120/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 121/300
279/279 [==============================] - 0s 100us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 122/300
279/279 [==============================] - 0s 75us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 123/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 124/300
279/279 [==============================] - 0s 111us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 125/300
279/279 [==============================] - 0s 111us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 126/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 127/300
279/279 [==============================] - 0s 72us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00127: ReduceLROnPlateau reducing learning rate to 1.4648437627329258e-06.
Epoch 128/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 129/300
279/279 [==============================] - 0s 107us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 130/300
279/279 [==============================] - 0s 107us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 131/300
279/279 [==============================] - 0s 111us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 132/300
279/279 [==============================] - 0s 100us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 133/300
279/279 [==============================] - 0s 125us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 134/300
279/279 [==============================] - 0s 104us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 135/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 136/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 137/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00137: ReduceLROnPlateau reducing learning rate to 7.324218813664629e-07.
Epoch 138/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 139/300
279/279 [==============================] - 0s 75us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 140/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 141/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 142/300
279/279 [==============================] - 0s 82us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 143/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 144/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 145/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 146/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 147/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00147: ReduceLROnPlateau reducing learning rate to 3.6621094068323146e-07.
Epoch 148/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 149/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 150/300
279/279 [==============================] - 0s 82us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 151/300
279/279 [==============================] - 0s 107us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 152/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 153/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 154/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 155/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 156/300
279/279 [==============================] - 0s 82us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 157/300
279/279 [==============================] - 0s 104us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00157: ReduceLROnPlateau reducing learning rate to 1.8310547034161573e-07.
Epoch 158/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 159/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 160/300
279/279 [==============================] - 0s 133us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 161/300
279/279 [==============================] - 0s 107us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 162/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 163/300
279/279 [==============================] - 0s 68us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 164/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 165/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 166/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 167/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00167: ReduceLROnPlateau reducing learning rate to 9.155273517080786e-08.
Epoch 168/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 169/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 170/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 171/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 172/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 173/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 174/300
279/279 [==============================] - 0s 168us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 175/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 176/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 177/300
279/279 [==============================] - 0s 111us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00177: ReduceLROnPlateau reducing learning rate to 4.577636758540393e-08.
Epoch 178/300
279/279 [==============================] - 0s 72us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 179/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 180/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 181/300
279/279 [==============================] - 0s 79us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 182/300
279/279 [==============================] - 0s 100us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 183/300
279/279 [==============================] - 0s 104us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 184/300
279/279 [==============================] - ETA: 0s - loss: 0.2749 - accuracy: 0.96 - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 185/300
279/279 [==============================] - 0s 104us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 186/300
279/279 [==============================] - 0s 82us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 187/300
279/279 [==============================] - 0s 82us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00187: ReduceLROnPlateau reducing learning rate to 2.2888183792701966e-08.
Epoch 188/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 189/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 190/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 191/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 192/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 193/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 194/300
279/279 [==============================] - 0s 72us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 195/300
279/279 [==============================] - 0s 100us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 196/300
279/279 [==============================] - 0s 115us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 197/300
279/279 [==============================] - 0s 107us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00197: ReduceLROnPlateau reducing learning rate to 1.1444091896350983e-08.
Epoch 198/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 199/300
279/279 [==============================] - 0s 79us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 200/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 201/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 202/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 203/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 204/300
279/279 [==============================] - 0s 104us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 205/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 206/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 207/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00207: ReduceLROnPlateau reducing learning rate to 5.7220459481754915e-09.
Epoch 208/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 209/300
279/279 [==============================] - 0s 79us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 210/300
279/279 [==============================] - 0s 104us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 211/300
279/279 [==============================] - 0s 100us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 212/300
279/279 [==============================] - 0s 104us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 213/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 214/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 215/300
279/279 [==============================] - 0s 107us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 216/300
279/279 [==============================] - 0s 75us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 217/300
279/279 [==============================] - 0s 82us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00217: ReduceLROnPlateau reducing learning rate to 2.8610229740877458e-09.
Epoch 218/300
279/279 [==============================] - 0s 100us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 219/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 220/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 221/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 222/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 223/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 224/300
279/279 [==============================] - 0s 82us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 225/300
279/279 [==============================] - 0s 82us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 226/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 227/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00227: ReduceLROnPlateau reducing learning rate to 1.4305114870438729e-09.
Epoch 228/300
279/279 [==============================] - 0s 79us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 229/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 230/300
279/279 [==============================] - 0s 82us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 231/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 232/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 233/300
279/279 [==============================] - 0s 122us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 234/300
279/279 [==============================] - 0s 115us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 235/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 236/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 237/300
279/279 [==============================] - 0s 104us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00237: ReduceLROnPlateau reducing learning rate to 7.152557435219364e-10.
Epoch 238/300
279/279 [==============================] - 0s 104us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 239/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 240/300
279/279 [==============================] - 0s 68us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 241/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 242/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 243/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 244/300
279/279 [==============================] - 0s 100us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 245/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 246/300
279/279 [==============================] - 0s 111us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 247/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00247: ReduceLROnPlateau reducing learning rate to 3.576278717609682e-10.
Epoch 248/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 249/300
279/279 [==============================] - 0s 107us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 250/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 251/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 252/300
279/279 [==============================] - 0s 104us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 253/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 254/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 255/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 256/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 257/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00257: ReduceLROnPlateau reducing learning rate to 1.788139358804841e-10.
Epoch 258/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 259/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 260/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 261/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 262/300
279/279 [==============================] - 0s 79us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 263/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 264/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 265/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 266/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 267/300
279/279 [==============================] - 0s 100us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00267: ReduceLROnPlateau reducing learning rate to 8.940696794024205e-11.
Epoch 268/300
279/279 [==============================] - 0s 100us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 269/300
279/279 [==============================] - 0s 111us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 270/300
279/279 [==============================] - 0s 115us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 271/300
279/279 [==============================] - 0s 75us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 272/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 273/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 274/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 275/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 276/300
279/279 [==============================] - 0s 90us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 277/300
279/279 [==============================] - 0s 75us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00277: ReduceLROnPlateau reducing learning rate to 4.470348397012103e-11.
Epoch 278/300
279/279 [==============================] - 0s 82us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 279/300
279/279 [==============================] - 0s 115us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 280/300
279/279 [==============================] - 0s 107us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 281/300
279/279 [==============================] - 0s 107us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 282/300
279/279 [==============================] - 0s 115us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 283/300
279/279 [==============================] - 0s 111us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 284/300
279/279 [==============================] - 0s 107us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 285/300
279/279 [==============================] - 0s 111us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 286/300
279/279 [==============================] - 0s 111us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 287/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00287: ReduceLROnPlateau reducing learning rate to 2.2351741985060514e-11.
Epoch 288/300
279/279 [==============================] - 0s 100us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 289/300
279/279 [==============================] - 0s 104us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 290/300
279/279 [==============================] - 0s 97us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 291/300
279/279 [==============================] - 0s 111us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 292/300
279/279 [==============================] - 0s 115us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 293/300
279/279 [==============================] - 0s 107us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 294/300
279/279 [==============================] - 0s 118us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 295/300
279/279 [==============================] - 0s 86us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 296/300
279/279 [==============================] - 0s 104us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 297/300
279/279 [==============================] - 0s 107us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849

Epoch 00297: ReduceLROnPlateau reducing learning rate to 1.1175870992530257e-11.
Epoch 298/300
279/279 [==============================] - 0s 107us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 299/300
279/279 [==============================] - 0s 93us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
Epoch 300/300
279/279 [==============================] - 0s 100us/step - loss: 0.3313 - accuracy: 0.8889 - val_loss: 0.4828 - val_accuracy: 0.7849
In [43]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 300)
In [44]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
93/93 [==============================] - 0s 54us/step
test loss: 0.48277782432494626, test accuracy: 0.7849462628364563
In [64]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.7282608695652174
In [65]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
Kappa:  0.366754617414248

KMeans

In [59]:
X
Out[59]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13
0 0.303210 1.253016 -0.394054 0.592598 0.106623 -2.083256 0.858313 0.801936 2.380580 -1.304258 0.556361 1.949530 -1.046692
1 0.647559 -1.553511 -1.648243 -1.010792 -0.857927 0.335856 0.884468 0.250703 0.298648 0.683922 1.599907 0.349480 -0.484103
2 2.266625 -0.333664 0.685765 -2.001143 -0.820018 -2.442242 -1.583451 -2.793213 -2.158376 -2.431307 -0.855856 -0.471626 -1.478884
3 0.672266 -0.397422 0.105106 -1.822060 -1.335294 -1.384110 -0.608579 -1.639581 -2.081721 -1.171877 -1.102493 -1.264328 -1.165166
4 0.747622 0.110331 -0.079109 -1.108698 -0.391749 -0.448919 0.133859 -0.843237 -1.924086 -0.222835 0.221819 0.017631 -0.725177
5 2.072441 -1.565884 -0.268750 -1.648648 -3.149219 -3.406801 -4.332461 -1.709990 -1.313696 -1.503431 -2.036749 -1.928213 -2.657430
6 -0.163654 0.470736 0.440011 0.594090 -1.227236 0.409218 0.608496 -0.948833 -2.051031 0.892327 0.371683 -0.318984 0.022251
7 0.477732 -0.217651 -0.908178 -1.257961 -1.360625 -0.551388 -0.599896 0.099066 1.299780 2.443060 0.417236 -0.919898 -0.916391
8 0.897786 -3.040013 0.311694 -0.386220 -0.321124 -0.221380 -0.207002 -0.127210 0.011073 0.211925 -0.400748 -0.296623 -0.143419
9 -0.977087 1.088438 -0.184899 -0.626934 0.577247 0.522552 1.150101 1.023214 0.136257 0.193237 -0.496760 0.367549 0.378679
10 0.328615 -1.292300 -1.398337 -0.677268 -1.070980 -0.075073 -0.740061 -0.424240 -0.216693 0.633892 -0.070397 0.960392 0.403827
11 -0.199470 0.110219 0.238637 0.455154 -0.116209 0.374450 0.078145 0.424005 0.633052 -0.153498 -0.647002 0.301135 -0.000406
12 -0.711256 0.124802 0.734425 -0.445078 -0.503247 -0.323539 0.236246 -0.572803 -0.221112 -0.206486 -0.180516 0.119335 0.027470
13 -0.806898 -0.126740 -0.383726 0.035489 -1.164460 -0.574335 -0.633858 -0.009812 -0.131411 0.549197 -0.257952 0.307916 0.814674
14 -0.077242 -0.331495 0.550493 -0.008575 -0.215759 -1.260552 -0.581296 0.369790 -0.684267 0.792489 -0.457321 -0.704205 -0.093986
15 0.244538 0.777957 0.464181 0.169574 -0.433604 -1.172185 -1.866928 0.759778 -0.372608 0.009766 0.964104 1.082661 -0.506505
16 0.602329 -0.035069 0.178352 -0.036690 0.180302 -0.769568 0.364535 0.996915 0.263984 -0.829872 -0.133422 0.601135 -1.217336
17 -0.570258 -0.759570 0.108993 0.657477 0.342355 -0.903388 0.112467 -0.669060 -0.661619 0.915675 1.620722 -0.160697 0.379275
18 -0.288268 -1.202534 -0.544058 0.295908 0.568680 -1.416228 0.423676 0.041836 -0.665694 0.699155 -0.070704 -0.429451 1.194384
19 -0.497305 -0.552590 0.332470 0.660607 0.293725 -0.945647 -1.269354 0.464095 1.166255 2.034233 2.037855 0.555927 0.423683
20 1.386141 -0.516432 -0.074640 0.751101 -1.151864 0.155819 -1.921431 -3.381158 -1.145758 -1.197084 0.654749 1.636425 0.993236
21 0.076772 0.072900 0.122544 0.799017 -1.121011 -0.137599 -1.150187 -1.669293 -1.110882 -0.047217 -0.034112 -0.659214 1.160642
22 0.670757 -0.167252 -0.352765 0.189499 -1.232602 -0.168579 -1.559900 -1.850665 -1.416478 0.031846 0.308193 -0.956133 0.507231
23 0.639283 0.699164 0.621380 -0.725771 -0.890352 0.643955 -1.097228 0.229756 -0.091793 -2.390193 -0.825768 -2.164531 -0.772983
24 -0.907399 2.155157 0.873522 1.655111 0.871099 1.083262 -0.186962 0.373227 -0.354082 0.573586 0.733097 -0.986481 0.727511
25 -1.152272 1.601470 0.221927 1.296592 0.572807 0.581774 -0.479257 0.209504 -0.636178 0.574450 0.615706 -0.877894 0.941827
26 -0.676596 0.405600 0.553370 0.691531 -0.292469 0.626694 0.080413 0.246868 -0.100975 0.606694 -0.024154 -1.553730 -0.210884
27 -0.704834 -0.058170 0.609171 -0.735340 -0.512747 0.796018 -0.405976 0.502120 0.717380 -1.625431 0.825742 -1.663942 -0.379395
28 0.273095 1.014503 -0.772750 -0.065028 -0.513485 0.235377 -0.266144 1.373964 0.711880 -1.261758 1.106463 0.515863 0.555866
29 1.216372 0.637021 0.649194 0.099873 -0.816614 0.555439 -1.272918 -0.035862 0.154194 -1.797465 -0.177830 -1.702118 -1.136716
... ... ... ... ... ... ... ... ... ... ... ... ... ...
342 0.056741 0.169776 0.434163 -0.208821 0.498957 0.349198 0.547068 0.250228 -0.743894 -0.497075 0.373497 -0.025547 0.260645
343 -0.845812 -0.163165 0.268174 1.312135 1.241686 1.484484 1.279782 -0.350179 -0.266719 -0.170434 0.147000 -0.259175 -0.323251
344 0.056854 0.089458 -0.128149 0.123107 -0.879175 0.172486 0.919301 0.727007 -0.032509 -0.594358 -0.241536 -0.339538 -1.563800
345 1.322735 -0.970372 -1.058427 1.018282 -1.228871 0.835533 1.462831 -1.481872 -2.024441 0.388890 2.395768 -0.993539 0.301816
346 -0.159679 -0.200313 -0.181878 0.221536 -0.604018 0.554979 0.173592 -1.137738 -1.525377 -0.382164 1.156959 0.545188 -0.873936
347 -0.510690 -0.141874 -0.170690 -0.486309 -1.066447 -1.098392 -1.513393 -0.202811 0.062343 0.446348 -0.029988 -0.024432 -0.978036
348 0.441393 0.403987 0.538948 1.253198 -0.158511 0.497768 0.151471 -0.006025 0.213458 0.119760 -0.002312 0.139434 -0.401118
349 0.548477 0.987769 0.505748 0.779668 0.504327 -0.003400 0.200264 0.287803 0.084852 -0.044437 0.769553 0.169816 -0.581506
350 0.278851 -0.150632 1.015313 0.158731 -1.435466 -0.910636 1.526971 0.810376 -0.088268 2.273901 1.895682 -0.573207 1.173543
351 1.781784 -0.680962 -0.140043 1.730156 0.760657 1.081874 0.686370 -0.456141 -0.310319 0.443108 0.067726 -0.804283 0.268616
352 1.110023 -0.419764 -0.451242 1.471440 0.860531 0.858025 1.016472 0.013533 -0.532955 0.597255 -0.385255 -1.299309 0.869963
353 0.463780 0.094111 0.074193 0.457058 -0.494585 -0.741218 -1.615368 -0.323890 0.179301 -0.914854 -0.881275 -0.284568 0.516848
354 0.162857 1.300630 -0.374191 -0.148478 -0.275205 0.936621 -0.301931 0.926288 -0.242039 -1.217862 -0.849053 0.381655 1.521222
355 -0.261040 1.897992 0.324175 0.250461 -0.326921 0.078347 -0.794723 1.245895 0.561437 0.299601 0.612062 0.375109 0.668225
356 -2.412627 -0.912657 0.924859 1.091412 -0.430459 0.991776 0.577087 0.366311 0.916132 -0.010096 -0.337066 0.723121 0.634413
357 -1.610420 -0.171488 1.308910 1.557149 -0.783120 1.055891 0.070922 0.736289 0.651236 -0.209692 -0.293388 0.549580 0.947465
358 -1.627642 -0.225022 1.420291 1.585386 -0.623077 1.204209 0.203574 0.815228 0.701131 -0.111706 -0.352897 0.552444 1.038487
359 1.658650 0.261694 0.694273 -0.634006 -0.742717 -1.107684 -0.040641 0.685375 0.704374 0.457634 -0.012812 -0.227444 -0.311482
360 -0.472450 1.290735 1.251486 0.902820 1.064267 0.319911 0.273062 -0.004026 -0.730129 -0.487802 -0.590033 0.917054 0.316796
361 0.145973 1.078298 -0.110458 0.396705 0.465683 0.120005 0.324478 0.647014 0.406366 0.303529 0.342183 0.418467 -0.257006
362 1.354053 0.408020 -1.449365 -0.144038 0.735070 1.458916 -0.253049 0.476118 1.309448 1.981607 0.319930 -0.734588 -2.427842
363 3.546326 -0.337767 -0.983896 -3.155084 1.922015 3.128359 1.576092 2.767242 2.734920 1.749030 -1.432287 -5.486282 -3.776088
364 3.564797 -0.492960 -0.663172 -2.465245 2.044991 3.045697 1.746383 2.238430 2.806354 2.318786 -0.732814 -5.203217 -4.762769
365 -0.480041 0.390140 0.283493 0.710367 0.436247 0.787936 0.149057 1.081200 1.130496 0.783116 1.174331 0.987069 1.027523
366 1.601344 1.120977 0.942690 0.218542 1.432015 1.975393 1.352637 0.851851 0.707687 1.420656 -1.301018 0.996552 2.286308
367 -1.388425 0.554214 2.322455 0.125526 0.168411 1.459935 -0.011567 0.377516 1.801634 1.061665 1.432895 1.553952 0.335629
368 -0.560849 0.191976 -1.558597 -1.925355 0.006144 1.582531 2.334107 1.472221 0.788027 -0.066399 -0.539592 -0.089987 1.081026
369 -0.015724 0.095384 -0.050287 0.330265 -0.857518 -1.110181 0.457976 1.235890 0.937447 1.294180 1.196429 1.964913 0.788473
370 -0.320091 0.789370 -0.347116 -0.257819 -0.264866 0.119392 0.174557 -0.102622 -0.147261 0.927949 1.494185 0.617596 -0.119013
371 -1.397911 0.969347 -0.218602 0.165675 -1.187201 -1.022111 0.090487 -0.281353 -1.520146 -0.950575 -1.733689 -0.924046 -0.731033

372 rows × 13 columns

In [60]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[60]:
[4836.0,
 4367.616182778379,
 3974.0733975762073,
 3720.4477504020774,
 3549.9823424876267,
 3386.093163495573,
 3254.3815559758523,
 3136.90996141146,
 3048.6934734702136,
 2957.186911982338,
 2855.333306370868,
 2802.0220333671496,
 2712.5965714921504,
 2657.189981994876]
In [89]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[89]:
[<matplotlib.lines.Line2D at 0x244598a8630>]

K=3

In [81]:
kmeans_mfcc = KMeans(n_clusters=3, random_state=0, n_init=10)
kmeans_mfcc.fit(X)
Out[81]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=3, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [82]:
kmeans_mfcc.labels_
Out[82]:
array([1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0,
       0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1,
       1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1,
       0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0,
       1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,
       0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1,
       1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1,
       1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1,
       1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 1,
       1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0,
       1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
       0, 1, 1, 2, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0,
       1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1,
       1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1,
       1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 0])
In [83]:
clusters_mfcc = kmeans_mfcc.predict(X)
clusters_mfcc
Out[83]:
array([1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0,
       0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1,
       1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1,
       0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0,
       1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,
       0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1,
       1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1,
       1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1,
       1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 1,
       1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0,
       1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
       0, 1, 1, 2, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0,
       1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1,
       1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1,
       1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 0])
In [84]:
X.loc[:,'Cluster'] = clusters_mfcc
X.loc[:,'chosen'] = list(y)
In [85]:
X
Out[85]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13 Cluster chosen
0 0.303210 1.253016 -0.394054 0.592598 0.106623 -2.083256 0.858313 0.801936 2.380580 -1.304258 0.556361 1.949530 -1.046692 1 0
1 0.647559 -1.553511 -1.648243 -1.010792 -0.857927 0.335856 0.884468 0.250703 0.298648 0.683922 1.599907 0.349480 -0.484103 1 0
2 2.266625 -0.333664 0.685765 -2.001143 -0.820018 -2.442242 -1.583451 -2.793213 -2.158376 -2.431307 -0.855856 -0.471626 -1.478884 0 0
3 0.672266 -0.397422 0.105106 -1.822060 -1.335294 -1.384110 -0.608579 -1.639581 -2.081721 -1.171877 -1.102493 -1.264328 -1.165166 0 0
4 0.747622 0.110331 -0.079109 -1.108698 -0.391749 -0.448919 0.133859 -0.843237 -1.924086 -0.222835 0.221819 0.017631 -0.725177 0 0
5 2.072441 -1.565884 -0.268750 -1.648648 -3.149219 -3.406801 -4.332461 -1.709990 -1.313696 -1.503431 -2.036749 -1.928213 -2.657430 0 0
6 -0.163654 0.470736 0.440011 0.594090 -1.227236 0.409218 0.608496 -0.948833 -2.051031 0.892327 0.371683 -0.318984 0.022251 0 0
7 0.477732 -0.217651 -0.908178 -1.257961 -1.360625 -0.551388 -0.599896 0.099066 1.299780 2.443060 0.417236 -0.919898 -0.916391 1 0
8 0.897786 -3.040013 0.311694 -0.386220 -0.321124 -0.221380 -0.207002 -0.127210 0.011073 0.211925 -0.400748 -0.296623 -0.143419 0 0
9 -0.977087 1.088438 -0.184899 -0.626934 0.577247 0.522552 1.150101 1.023214 0.136257 0.193237 -0.496760 0.367549 0.378679 1 0
10 0.328615 -1.292300 -1.398337 -0.677268 -1.070980 -0.075073 -0.740061 -0.424240 -0.216693 0.633892 -0.070397 0.960392 0.403827 0 0
11 -0.199470 0.110219 0.238637 0.455154 -0.116209 0.374450 0.078145 0.424005 0.633052 -0.153498 -0.647002 0.301135 -0.000406 1 0
12 -0.711256 0.124802 0.734425 -0.445078 -0.503247 -0.323539 0.236246 -0.572803 -0.221112 -0.206486 -0.180516 0.119335 0.027470 0 0
13 -0.806898 -0.126740 -0.383726 0.035489 -1.164460 -0.574335 -0.633858 -0.009812 -0.131411 0.549197 -0.257952 0.307916 0.814674 0 0
14 -0.077242 -0.331495 0.550493 -0.008575 -0.215759 -1.260552 -0.581296 0.369790 -0.684267 0.792489 -0.457321 -0.704205 -0.093986 0 0
15 0.244538 0.777957 0.464181 0.169574 -0.433604 -1.172185 -1.866928 0.759778 -0.372608 0.009766 0.964104 1.082661 -0.506505 0 0
16 0.602329 -0.035069 0.178352 -0.036690 0.180302 -0.769568 0.364535 0.996915 0.263984 -0.829872 -0.133422 0.601135 -1.217336 1 0
17 -0.570258 -0.759570 0.108993 0.657477 0.342355 -0.903388 0.112467 -0.669060 -0.661619 0.915675 1.620722 -0.160697 0.379275 0 0
18 -0.288268 -1.202534 -0.544058 0.295908 0.568680 -1.416228 0.423676 0.041836 -0.665694 0.699155 -0.070704 -0.429451 1.194384 0 0
19 -0.497305 -0.552590 0.332470 0.660607 0.293725 -0.945647 -1.269354 0.464095 1.166255 2.034233 2.037855 0.555927 0.423683 1 0
20 1.386141 -0.516432 -0.074640 0.751101 -1.151864 0.155819 -1.921431 -3.381158 -1.145758 -1.197084 0.654749 1.636425 0.993236 0 0
21 0.076772 0.072900 0.122544 0.799017 -1.121011 -0.137599 -1.150187 -1.669293 -1.110882 -0.047217 -0.034112 -0.659214 1.160642 0 0
22 0.670757 -0.167252 -0.352765 0.189499 -1.232602 -0.168579 -1.559900 -1.850665 -1.416478 0.031846 0.308193 -0.956133 0.507231 0 0
23 0.639283 0.699164 0.621380 -0.725771 -0.890352 0.643955 -1.097228 0.229756 -0.091793 -2.390193 -0.825768 -2.164531 -0.772983 1 0
24 -0.907399 2.155157 0.873522 1.655111 0.871099 1.083262 -0.186962 0.373227 -0.354082 0.573586 0.733097 -0.986481 0.727511 1 0
25 -1.152272 1.601470 0.221927 1.296592 0.572807 0.581774 -0.479257 0.209504 -0.636178 0.574450 0.615706 -0.877894 0.941827 1 0
26 -0.676596 0.405600 0.553370 0.691531 -0.292469 0.626694 0.080413 0.246868 -0.100975 0.606694 -0.024154 -1.553730 -0.210884 1 0
27 -0.704834 -0.058170 0.609171 -0.735340 -0.512747 0.796018 -0.405976 0.502120 0.717380 -1.625431 0.825742 -1.663942 -0.379395 1 0
28 0.273095 1.014503 -0.772750 -0.065028 -0.513485 0.235377 -0.266144 1.373964 0.711880 -1.261758 1.106463 0.515863 0.555866 1 0
29 1.216372 0.637021 0.649194 0.099873 -0.816614 0.555439 -1.272918 -0.035862 0.154194 -1.797465 -0.177830 -1.702118 -1.136716 1 0
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
342 0.056741 0.169776 0.434163 -0.208821 0.498957 0.349198 0.547068 0.250228 -0.743894 -0.497075 0.373497 -0.025547 0.260645 1 1
343 -0.845812 -0.163165 0.268174 1.312135 1.241686 1.484484 1.279782 -0.350179 -0.266719 -0.170434 0.147000 -0.259175 -0.323251 1 1
344 0.056854 0.089458 -0.128149 0.123107 -0.879175 0.172486 0.919301 0.727007 -0.032509 -0.594358 -0.241536 -0.339538 -1.563800 1 1
345 1.322735 -0.970372 -1.058427 1.018282 -1.228871 0.835533 1.462831 -1.481872 -2.024441 0.388890 2.395768 -0.993539 0.301816 0 1
346 -0.159679 -0.200313 -0.181878 0.221536 -0.604018 0.554979 0.173592 -1.137738 -1.525377 -0.382164 1.156959 0.545188 -0.873936 0 1
347 -0.510690 -0.141874 -0.170690 -0.486309 -1.066447 -1.098392 -1.513393 -0.202811 0.062343 0.446348 -0.029988 -0.024432 -0.978036 0 1
348 0.441393 0.403987 0.538948 1.253198 -0.158511 0.497768 0.151471 -0.006025 0.213458 0.119760 -0.002312 0.139434 -0.401118 1 1
349 0.548477 0.987769 0.505748 0.779668 0.504327 -0.003400 0.200264 0.287803 0.084852 -0.044437 0.769553 0.169816 -0.581506 1 1
350 0.278851 -0.150632 1.015313 0.158731 -1.435466 -0.910636 1.526971 0.810376 -0.088268 2.273901 1.895682 -0.573207 1.173543 1 1
351 1.781784 -0.680962 -0.140043 1.730156 0.760657 1.081874 0.686370 -0.456141 -0.310319 0.443108 0.067726 -0.804283 0.268616 1 1
352 1.110023 -0.419764 -0.451242 1.471440 0.860531 0.858025 1.016472 0.013533 -0.532955 0.597255 -0.385255 -1.299309 0.869963 1 1
353 0.463780 0.094111 0.074193 0.457058 -0.494585 -0.741218 -1.615368 -0.323890 0.179301 -0.914854 -0.881275 -0.284568 0.516848 0 1
354 0.162857 1.300630 -0.374191 -0.148478 -0.275205 0.936621 -0.301931 0.926288 -0.242039 -1.217862 -0.849053 0.381655 1.521222 1 1
355 -0.261040 1.897992 0.324175 0.250461 -0.326921 0.078347 -0.794723 1.245895 0.561437 0.299601 0.612062 0.375109 0.668225 1 1
356 -2.412627 -0.912657 0.924859 1.091412 -0.430459 0.991776 0.577087 0.366311 0.916132 -0.010096 -0.337066 0.723121 0.634413 1 1
357 -1.610420 -0.171488 1.308910 1.557149 -0.783120 1.055891 0.070922 0.736289 0.651236 -0.209692 -0.293388 0.549580 0.947465 1 1
358 -1.627642 -0.225022 1.420291 1.585386 -0.623077 1.204209 0.203574 0.815228 0.701131 -0.111706 -0.352897 0.552444 1.038487 1 1
359 1.658650 0.261694 0.694273 -0.634006 -0.742717 -1.107684 -0.040641 0.685375 0.704374 0.457634 -0.012812 -0.227444 -0.311482 1 1
360 -0.472450 1.290735 1.251486 0.902820 1.064267 0.319911 0.273062 -0.004026 -0.730129 -0.487802 -0.590033 0.917054 0.316796 1 1
361 0.145973 1.078298 -0.110458 0.396705 0.465683 0.120005 0.324478 0.647014 0.406366 0.303529 0.342183 0.418467 -0.257006 1 1
362 1.354053 0.408020 -1.449365 -0.144038 0.735070 1.458916 -0.253049 0.476118 1.309448 1.981607 0.319930 -0.734588 -2.427842 1 1
363 3.546326 -0.337767 -0.983896 -3.155084 1.922015 3.128359 1.576092 2.767242 2.734920 1.749030 -1.432287 -5.486282 -3.776088 2 1
364 3.564797 -0.492960 -0.663172 -2.465245 2.044991 3.045697 1.746383 2.238430 2.806354 2.318786 -0.732814 -5.203217 -4.762769 2 1
365 -0.480041 0.390140 0.283493 0.710367 0.436247 0.787936 0.149057 1.081200 1.130496 0.783116 1.174331 0.987069 1.027523 1 1
366 1.601344 1.120977 0.942690 0.218542 1.432015 1.975393 1.352637 0.851851 0.707687 1.420656 -1.301018 0.996552 2.286308 1 1
367 -1.388425 0.554214 2.322455 0.125526 0.168411 1.459935 -0.011567 0.377516 1.801634 1.061665 1.432895 1.553952 0.335629 1 1
368 -0.560849 0.191976 -1.558597 -1.925355 0.006144 1.582531 2.334107 1.472221 0.788027 -0.066399 -0.539592 -0.089987 1.081026 1 1
369 -0.015724 0.095384 -0.050287 0.330265 -0.857518 -1.110181 0.457976 1.235890 0.937447 1.294180 1.196429 1.964913 0.788473 1 1
370 -0.320091 0.789370 -0.347116 -0.257819 -0.264866 0.119392 0.174557 -0.102622 -0.147261 0.927949 1.494185 0.617596 -0.119013 1 1
371 -1.397911 0.969347 -0.218602 0.165675 -1.187201 -1.022111 0.090487 -0.281353 -1.520146 -0.950575 -1.733689 -0.924046 -0.731033 0 1

372 rows × 15 columns

In [86]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[86]:
<matplotlib.axes._subplots.AxesSubplot at 0x2445d307358>
In [27]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[1]))

Club De Banqueros y Empresarios

ANN

In [43]:
X = df_n_ps_std_mfcc[1].drop(columns='Cluster')
In [44]:
y = df_n_ps[1]['chosen']
In [45]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [46]:
X_train.shape
Out[46]:
(191, 13)
In [47]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [48]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [49]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [50]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'relu', 'hidden_layer_sizes': (20, 20, 20), 'learning_rate_init': 0.01, 'max_iter': 2000}, que permiten obtener un Accuracy de 81.68% y un Kappa del 52.97
Tiempo total: 25.07 minutos
In [51]:
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [52]:
input_tensor = Input(shape = (n0,))
In [53]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = 'tanh')(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [54]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [55]:
model.summary()
Model: "model_2"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_2 (InputLayer)         (None, 13)                0         
_________________________________________________________________
dense_4 (Dense)              (None, 20)                280       
_________________________________________________________________
dense_5 (Dense)              (None, 20)                420       
_________________________________________________________________
dense_6 (Dense)              (None, 20)                420       
_________________________________________________________________
dense_7 (Dense)              (None, 1)                 21        
=================================================================
Total params: 1,141
Trainable params: 1,141
Non-trainable params: 0
_________________________________________________________________
In [56]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), batch_size= 32,
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 191 samples, validate on 64 samples
Epoch 1/2000
191/191 [==============================] - 2s 10ms/step - loss: 0.6523 - accuracy: 0.5864 - val_loss: 0.5471 - val_accuracy: 0.7031
Epoch 2/2000
191/191 [==============================] - 0s 89us/step - loss: 0.4800 - accuracy: 0.7696 - val_loss: 0.5294 - val_accuracy: 0.7500
Epoch 3/2000
191/191 [==============================] - 0s 68us/step - loss: 0.4421 - accuracy: 0.8010 - val_loss: 0.5305 - val_accuracy: 0.7812
Epoch 4/2000
191/191 [==============================] - 0s 63us/step - loss: 0.4243 - accuracy: 0.8168 - val_loss: 0.5390 - val_accuracy: 0.7500
Epoch 5/2000
191/191 [==============================] - 0s 73us/step - loss: 0.4080 - accuracy: 0.8168 - val_loss: 0.5368 - val_accuracy: 0.7500
Epoch 6/2000
191/191 [==============================] - 0s 58us/step - loss: 0.3924 - accuracy: 0.8168 - val_loss: 0.5567 - val_accuracy: 0.7188
Epoch 7/2000
191/191 [==============================] - 0s 89us/step - loss: 0.3782 - accuracy: 0.8168 - val_loss: 0.5325 - val_accuracy: 0.7500
Epoch 8/2000
191/191 [==============================] - 0s 63us/step - loss: 0.3562 - accuracy: 0.8429 - val_loss: 0.5605 - val_accuracy: 0.7500
Epoch 9/2000
191/191 [==============================] - 0s 63us/step - loss: 0.3462 - accuracy: 0.8586 - val_loss: 0.5968 - val_accuracy: 0.7188
Epoch 10/2000
191/191 [==============================] - 0s 89us/step - loss: 0.3171 - accuracy: 0.8639 - val_loss: 0.5665 - val_accuracy: 0.7500
Epoch 11/2000
191/191 [==============================] - 0s 68us/step - loss: 0.3107 - accuracy: 0.8639 - val_loss: 0.5456 - val_accuracy: 0.7656
Epoch 12/2000
191/191 [==============================] - 0s 68us/step - loss: 0.2855 - accuracy: 0.8743 - val_loss: 0.6107 - val_accuracy: 0.7500
Epoch 13/2000
191/191 [==============================] - 0s 58us/step - loss: 0.2626 - accuracy: 0.8848 - val_loss: 0.6076 - val_accuracy: 0.7656

Epoch 00013: ReduceLROnPlateau reducing learning rate to 0.004999999888241291.
Epoch 14/2000
191/191 [==============================] - 0s 94us/step - loss: 0.2396 - accuracy: 0.9215 - val_loss: 0.5932 - val_accuracy: 0.7656
Epoch 15/2000
191/191 [==============================] - 0s 68us/step - loss: 0.2269 - accuracy: 0.9162 - val_loss: 0.5980 - val_accuracy: 0.7812
Epoch 16/2000
191/191 [==============================] - 0s 63us/step - loss: 0.2133 - accuracy: 0.9372 - val_loss: 0.6289 - val_accuracy: 0.7656
Epoch 17/2000
191/191 [==============================] - 0s 68us/step - loss: 0.2023 - accuracy: 0.9372 - val_loss: 0.6384 - val_accuracy: 0.7656
Epoch 18/2000
191/191 [==============================] - 0s 73us/step - loss: 0.1904 - accuracy: 0.9424 - val_loss: 0.6197 - val_accuracy: 0.7656
Epoch 19/2000
191/191 [==============================] - 0s 63us/step - loss: 0.1817 - accuracy: 0.9372 - val_loss: 0.6255 - val_accuracy: 0.7656
Epoch 20/2000
191/191 [==============================] - 0s 68us/step - loss: 0.1725 - accuracy: 0.9424 - val_loss: 0.6415 - val_accuracy: 0.7500
Epoch 21/2000
191/191 [==============================] - 0s 73us/step - loss: 0.1603 - accuracy: 0.9424 - val_loss: 0.6400 - val_accuracy: 0.7656
Epoch 22/2000
191/191 [==============================] - 0s 58us/step - loss: 0.1473 - accuracy: 0.9581 - val_loss: 0.6420 - val_accuracy: 0.7500
Epoch 23/2000
191/191 [==============================] - 0s 58us/step - loss: 0.1367 - accuracy: 0.9738 - val_loss: 0.6448 - val_accuracy: 0.7656

Epoch 00023: ReduceLROnPlateau reducing learning rate to 0.0024999999441206455.
Epoch 24/2000
191/191 [==============================] - 0s 68us/step - loss: 0.1248 - accuracy: 0.9791 - val_loss: 0.6578 - val_accuracy: 0.7812
Epoch 25/2000
191/191 [==============================] - 0s 63us/step - loss: 0.1211 - accuracy: 0.9738 - val_loss: 0.6633 - val_accuracy: 0.7812
Epoch 26/2000
191/191 [==============================] - 0s 58us/step - loss: 0.1166 - accuracy: 0.9791 - val_loss: 0.6651 - val_accuracy: 0.7656
Epoch 27/2000
191/191 [==============================] - 0s 110us/step - loss: 0.1121 - accuracy: 0.9791 - val_loss: 0.6655 - val_accuracy: 0.7500
Epoch 28/2000
191/191 [==============================] - 0s 73us/step - loss: 0.1069 - accuracy: 0.9791 - val_loss: 0.6765 - val_accuracy: 0.7656
Epoch 29/2000
191/191 [==============================] - 0s 63us/step - loss: 0.1026 - accuracy: 0.9791 - val_loss: 0.6865 - val_accuracy: 0.7656
Epoch 30/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0981 - accuracy: 0.9843 - val_loss: 0.6846 - val_accuracy: 0.7656
Epoch 31/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0922 - accuracy: 0.9843 - val_loss: 0.6932 - val_accuracy: 0.7656
Epoch 32/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0883 - accuracy: 0.9843 - val_loss: 0.7066 - val_accuracy: 0.7656
Epoch 33/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0829 - accuracy: 0.9895 - val_loss: 0.7111 - val_accuracy: 0.7656

Epoch 00033: ReduceLROnPlateau reducing learning rate to 0.0012499999720603228.
Epoch 34/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0790 - accuracy: 0.9895 - val_loss: 0.7178 - val_accuracy: 0.7656
Epoch 35/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0767 - accuracy: 0.9895 - val_loss: 0.7203 - val_accuracy: 0.7656
Epoch 36/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0752 - accuracy: 0.9895 - val_loss: 0.7267 - val_accuracy: 0.7656
Epoch 37/2000
191/191 [==============================] - 0s 126us/step - loss: 0.0729 - accuracy: 0.9895 - val_loss: 0.7269 - val_accuracy: 0.7656
Epoch 38/2000
191/191 [==============================] - 0s 131us/step - loss: 0.0705 - accuracy: 0.9895 - val_loss: 0.7364 - val_accuracy: 0.7656
Epoch 39/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0686 - accuracy: 0.9895 - val_loss: 0.7434 - val_accuracy: 0.7656
Epoch 40/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0668 - accuracy: 0.9895 - val_loss: 0.7461 - val_accuracy: 0.7812
Epoch 41/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0648 - accuracy: 0.9895 - val_loss: 0.7476 - val_accuracy: 0.7812
Epoch 42/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0631 - accuracy: 0.9895 - val_loss: 0.7577 - val_accuracy: 0.7812
Epoch 43/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0610 - accuracy: 0.9895 - val_loss: 0.7680 - val_accuracy: 0.7812

Epoch 00043: ReduceLROnPlateau reducing learning rate to 0.0006249999860301614.
Epoch 44/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0592 - accuracy: 0.9895 - val_loss: 0.7672 - val_accuracy: 0.7812
Epoch 45/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0582 - accuracy: 0.9895 - val_loss: 0.7674 - val_accuracy: 0.7812
Epoch 46/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0576 - accuracy: 0.9895 - val_loss: 0.7688 - val_accuracy: 0.7812
Epoch 47/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0566 - accuracy: 0.9895 - val_loss: 0.7700 - val_accuracy: 0.7812
Epoch 48/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0557 - accuracy: 0.9895 - val_loss: 0.7748 - val_accuracy: 0.7812
Epoch 49/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0547 - accuracy: 0.9895 - val_loss: 0.7777 - val_accuracy: 0.7812
Epoch 50/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0540 - accuracy: 0.9948 - val_loss: 0.7821 - val_accuracy: 0.7812
Epoch 51/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0533 - accuracy: 0.9948 - val_loss: 0.7849 - val_accuracy: 0.7812
Epoch 52/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0525 - accuracy: 0.9948 - val_loss: 0.7842 - val_accuracy: 0.7812
Epoch 53/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0515 - accuracy: 0.9948 - val_loss: 0.7869 - val_accuracy: 0.7812

Epoch 00053: ReduceLROnPlateau reducing learning rate to 0.0003124999930150807.
Epoch 54/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0509 - accuracy: 0.9948 - val_loss: 0.7898 - val_accuracy: 0.7812
Epoch 55/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0504 - accuracy: 0.9948 - val_loss: 0.7899 - val_accuracy: 0.7812
Epoch 56/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0500 - accuracy: 0.9948 - val_loss: 0.7926 - val_accuracy: 0.7812
Epoch 57/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0496 - accuracy: 0.9948 - val_loss: 0.7942 - val_accuracy: 0.7812
Epoch 58/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0492 - accuracy: 0.9948 - val_loss: 0.7951 - val_accuracy: 0.7812
Epoch 59/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0487 - accuracy: 0.9948 - val_loss: 0.7963 - val_accuracy: 0.7812
Epoch 60/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0484 - accuracy: 0.9948 - val_loss: 0.7975 - val_accuracy: 0.7812
Epoch 61/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0480 - accuracy: 0.9948 - val_loss: 0.7996 - val_accuracy: 0.7812
Epoch 62/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0476 - accuracy: 0.9948 - val_loss: 0.8015 - val_accuracy: 0.7812
Epoch 63/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0472 - accuracy: 0.9948 - val_loss: 0.8032 - val_accuracy: 0.7656

Epoch 00063: ReduceLROnPlateau reducing learning rate to 0.00015624999650754035.
Epoch 64/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0468 - accuracy: 0.9948 - val_loss: 0.8047 - val_accuracy: 0.7656
Epoch 65/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0466 - accuracy: 0.9948 - val_loss: 0.8048 - val_accuracy: 0.7656
Epoch 66/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0464 - accuracy: 0.9948 - val_loss: 0.8054 - val_accuracy: 0.7656
Epoch 67/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0462 - accuracy: 0.9948 - val_loss: 0.8063 - val_accuracy: 0.7656
Epoch 68/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0461 - accuracy: 0.9948 - val_loss: 0.8067 - val_accuracy: 0.7656
Epoch 69/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0459 - accuracy: 0.9948 - val_loss: 0.8076 - val_accuracy: 0.7656
Epoch 70/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0456 - accuracy: 0.9948 - val_loss: 0.8089 - val_accuracy: 0.7656
Epoch 71/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0455 - accuracy: 0.9948 - val_loss: 0.8099 - val_accuracy: 0.7656
Epoch 72/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0452 - accuracy: 0.9948 - val_loss: 0.8107 - val_accuracy: 0.7656
Epoch 73/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0451 - accuracy: 0.9948 - val_loss: 0.8106 - val_accuracy: 0.7656

Epoch 00073: ReduceLROnPlateau reducing learning rate to 7.812499825377017e-05.
Epoch 74/2000
191/191 [==============================] - 0s 79us/step - loss: 0.0449 - accuracy: 0.9948 - val_loss: 0.8107 - val_accuracy: 0.7656
Epoch 75/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0448 - accuracy: 0.9948 - val_loss: 0.8113 - val_accuracy: 0.7656
Epoch 76/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0447 - accuracy: 0.9948 - val_loss: 0.8118 - val_accuracy: 0.7656
Epoch 77/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0446 - accuracy: 0.9948 - val_loss: 0.8122 - val_accuracy: 0.7656
Epoch 78/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0445 - accuracy: 0.9948 - val_loss: 0.8128 - val_accuracy: 0.7656
Epoch 79/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0444 - accuracy: 0.9948 - val_loss: 0.8131 - val_accuracy: 0.7656
Epoch 80/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0443 - accuracy: 0.9948 - val_loss: 0.8137 - val_accuracy: 0.7656
Epoch 81/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0442 - accuracy: 0.9948 - val_loss: 0.8142 - val_accuracy: 0.7656
Epoch 82/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0441 - accuracy: 0.9948 - val_loss: 0.8146 - val_accuracy: 0.7656
Epoch 83/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0440 - accuracy: 0.9948 - val_loss: 0.8150 - val_accuracy: 0.7656

Epoch 00083: ReduceLROnPlateau reducing learning rate to 3.9062499126885086e-05.
Epoch 84/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0439 - accuracy: 0.9948 - val_loss: 0.8152 - val_accuracy: 0.7656
Epoch 85/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0439 - accuracy: 0.9948 - val_loss: 0.8155 - val_accuracy: 0.7656
Epoch 86/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0438 - accuracy: 0.9948 - val_loss: 0.8155 - val_accuracy: 0.7656
Epoch 87/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0438 - accuracy: 0.9948 - val_loss: 0.8159 - val_accuracy: 0.7656
Epoch 88/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0437 - accuracy: 0.9948 - val_loss: 0.8161 - val_accuracy: 0.7656
Epoch 89/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0437 - accuracy: 0.9948 - val_loss: 0.8162 - val_accuracy: 0.7656
Epoch 90/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0436 - accuracy: 0.9948 - val_loss: 0.8164 - val_accuracy: 0.7656
Epoch 91/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0436 - accuracy: 0.9948 - val_loss: 0.8166 - val_accuracy: 0.7656
Epoch 92/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0435 - accuracy: 0.9948 - val_loss: 0.8170 - val_accuracy: 0.7656
Epoch 93/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0435 - accuracy: 0.9948 - val_loss: 0.8171 - val_accuracy: 0.7656

Epoch 00093: ReduceLROnPlateau reducing learning rate to 1.9531249563442543e-05.
Epoch 94/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0434 - accuracy: 0.9948 - val_loss: 0.8172 - val_accuracy: 0.7656
Epoch 95/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0434 - accuracy: 0.9948 - val_loss: 0.8174 - val_accuracy: 0.7656
Epoch 96/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0434 - accuracy: 0.9948 - val_loss: 0.8175 - val_accuracy: 0.7656
Epoch 97/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0433 - accuracy: 0.9948 - val_loss: 0.8174 - val_accuracy: 0.7656
Epoch 98/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0433 - accuracy: 0.9948 - val_loss: 0.8176 - val_accuracy: 0.7656
Epoch 99/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0433 - accuracy: 0.9948 - val_loss: 0.8177 - val_accuracy: 0.7656
Epoch 100/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0433 - accuracy: 0.9948 - val_loss: 0.8179 - val_accuracy: 0.7656
Epoch 101/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0432 - accuracy: 0.9948 - val_loss: 0.8180 - val_accuracy: 0.7656
Epoch 102/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0432 - accuracy: 0.9948 - val_loss: 0.8180 - val_accuracy: 0.7656
Epoch 103/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0432 - accuracy: 0.9948 - val_loss: 0.8182 - val_accuracy: 0.7656

Epoch 00103: ReduceLROnPlateau reducing learning rate to 9.765624781721272e-06.
Epoch 104/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0432 - accuracy: 0.9948 - val_loss: 0.8182 - val_accuracy: 0.7656
Epoch 105/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0431 - accuracy: 0.9948 - val_loss: 0.8183 - val_accuracy: 0.7656
Epoch 106/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0431 - accuracy: 0.9948 - val_loss: 0.8183 - val_accuracy: 0.7656
Epoch 107/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0431 - accuracy: 0.9948 - val_loss: 0.8184 - val_accuracy: 0.7656
Epoch 108/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0431 - accuracy: 0.9948 - val_loss: 0.8185 - val_accuracy: 0.7656
Epoch 109/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0431 - accuracy: 0.9948 - val_loss: 0.8185 - val_accuracy: 0.7656
Epoch 110/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0431 - accuracy: 0.9948 - val_loss: 0.8186 - val_accuracy: 0.7656
Epoch 111/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0431 - accuracy: 0.9948 - val_loss: 0.8186 - val_accuracy: 0.7656
Epoch 112/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0431 - accuracy: 0.9948 - val_loss: 0.8187 - val_accuracy: 0.7656
Epoch 113/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8187 - val_accuracy: 0.7656

Epoch 00113: ReduceLROnPlateau reducing learning rate to 4.882812390860636e-06.
Epoch 114/2000
191/191 [==============================] - 0s 47us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8188 - val_accuracy: 0.7656
Epoch 115/2000
191/191 [==============================] - 0s 47us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8188 - val_accuracy: 0.7656
Epoch 116/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8188 - val_accuracy: 0.7656
Epoch 117/2000
191/191 [==============================] - 0s 47us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8189 - val_accuracy: 0.7656
Epoch 118/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8189 - val_accuracy: 0.7656
Epoch 119/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8189 - val_accuracy: 0.7656
Epoch 120/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8190 - val_accuracy: 0.7656
Epoch 121/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8190 - val_accuracy: 0.7656
Epoch 122/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8190 - val_accuracy: 0.7656
Epoch 123/2000
191/191 [==============================] - 0s 47us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8190 - val_accuracy: 0.7656

Epoch 00123: ReduceLROnPlateau reducing learning rate to 2.441406195430318e-06.
Epoch 124/2000
191/191 [==============================] - 0s 47us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8190 - val_accuracy: 0.7656
Epoch 125/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8191 - val_accuracy: 0.7656
Epoch 126/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8191 - val_accuracy: 0.7656
Epoch 127/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8191 - val_accuracy: 0.7656
Epoch 128/2000
191/191 [==============================] - 0s 47us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8191 - val_accuracy: 0.7656
Epoch 129/2000
191/191 [==============================] - 0s 47us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8191 - val_accuracy: 0.7656
Epoch 130/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8191 - val_accuracy: 0.7656
Epoch 131/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8191 - val_accuracy: 0.7656
Epoch 132/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8192 - val_accuracy: 0.7656
Epoch 133/2000
191/191 [==============================] - 0s 47us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8192 - val_accuracy: 0.7656

Epoch 00133: ReduceLROnPlateau reducing learning rate to 1.220703097715159e-06.
Epoch 134/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8192 - val_accuracy: 0.7656
Epoch 135/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8192 - val_accuracy: 0.7656
Epoch 136/2000
191/191 [==============================] - 0s 47us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8192 - val_accuracy: 0.7656
Epoch 137/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8192 - val_accuracy: 0.7656
Epoch 138/2000
191/191 [==============================] - 0s 47us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8192 - val_accuracy: 0.7656
Epoch 139/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8192 - val_accuracy: 0.7656
Epoch 140/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8192 - val_accuracy: 0.7656
Epoch 141/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8192 - val_accuracy: 0.7656
Epoch 142/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8192 - val_accuracy: 0.7656
Epoch 143/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8192 - val_accuracy: 0.7656

Epoch 00143: ReduceLROnPlateau reducing learning rate to 6.103515488575795e-07.
Epoch 144/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 145/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 146/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 147/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 148/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 149/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 150/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 151/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 152/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 153/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00153: ReduceLROnPlateau reducing learning rate to 3.0517577442878974e-07.
Epoch 154/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 155/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 156/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 157/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 158/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 159/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 160/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 161/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 162/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 163/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00163: ReduceLROnPlateau reducing learning rate to 1.5258788721439487e-07.
Epoch 164/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 165/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 166/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 167/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 168/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 169/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 170/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 171/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 172/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 173/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00173: ReduceLROnPlateau reducing learning rate to 7.629394360719743e-08.
Epoch 174/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 175/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 176/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 177/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 178/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 179/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 180/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 181/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 182/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 183/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00183: ReduceLROnPlateau reducing learning rate to 3.814697180359872e-08.
Epoch 184/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 185/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 186/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 187/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 188/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 189/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 190/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 191/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 192/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 193/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00193: ReduceLROnPlateau reducing learning rate to 1.907348590179936e-08.
Epoch 194/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 195/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 196/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 197/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 198/2000
191/191 [==============================] - 0s 79us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 199/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 200/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 201/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 202/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 203/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00203: ReduceLROnPlateau reducing learning rate to 9.53674295089968e-09.
Epoch 204/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 205/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 206/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 207/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 208/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 209/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 210/2000
191/191 [==============================] - ETA: 0s - loss: 0.0166 - accuracy: 1.00 - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 211/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 212/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 213/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00213: ReduceLROnPlateau reducing learning rate to 4.76837147544984e-09.
Epoch 214/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 215/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 216/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 217/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 218/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 219/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 220/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 221/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 222/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 223/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00223: ReduceLROnPlateau reducing learning rate to 2.38418573772492e-09.
Epoch 224/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 225/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 226/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 227/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 228/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 229/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 230/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 231/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 232/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 233/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00233: ReduceLROnPlateau reducing learning rate to 1.19209286886246e-09.
Epoch 234/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 235/2000
191/191 [==============================] - ETA: 0s - loss: 0.0224 - accuracy: 1.00 - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 236/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 237/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 238/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 239/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 240/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 241/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 242/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 243/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00243: ReduceLROnPlateau reducing learning rate to 5.9604643443123e-10.
Epoch 244/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 245/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 246/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 247/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 248/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 249/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 250/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 251/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 252/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 253/2000
191/191 [==============================] - ETA: 0s - loss: 0.0256 - accuracy: 1.00 - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00253: ReduceLROnPlateau reducing learning rate to 2.98023217215615e-10.
Epoch 254/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 255/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 256/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 257/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 258/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 259/2000
191/191 [==============================] - 0s 79us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 260/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 261/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 262/2000
191/191 [==============================] - ETA: 0s - loss: 0.0223 - accuracy: 1.00 - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 263/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00263: ReduceLROnPlateau reducing learning rate to 1.490116086078075e-10.
Epoch 264/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 265/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 266/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 267/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 268/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 269/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 270/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 271/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 272/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 273/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00273: ReduceLROnPlateau reducing learning rate to 7.450580430390374e-11.
Epoch 274/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 275/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 276/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 277/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 278/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 279/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 280/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 281/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 282/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 283/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00283: ReduceLROnPlateau reducing learning rate to 3.725290215195187e-11.
Epoch 284/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 285/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 286/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 287/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 288/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 289/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 290/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 291/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 292/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 293/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00293: ReduceLROnPlateau reducing learning rate to 1.8626451075975936e-11.
Epoch 294/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 295/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 296/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 297/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 298/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 299/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 300/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 301/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 302/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 303/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00303: ReduceLROnPlateau reducing learning rate to 9.313225537987968e-12.
Epoch 304/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 305/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 306/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 307/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 308/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 309/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 310/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 311/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 312/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 313/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00313: ReduceLROnPlateau reducing learning rate to 4.656612768993984e-12.
Epoch 314/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 315/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 316/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 317/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 318/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 319/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 320/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 321/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 322/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 323/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00323: ReduceLROnPlateau reducing learning rate to 2.328306384496992e-12.
Epoch 324/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 325/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 326/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 327/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 328/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 329/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 330/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 331/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 332/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 333/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00333: ReduceLROnPlateau reducing learning rate to 1.164153192248496e-12.
Epoch 334/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 335/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 336/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 337/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 338/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 339/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 340/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 341/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 342/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 343/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00343: ReduceLROnPlateau reducing learning rate to 5.82076596124248e-13.
Epoch 344/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 345/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 346/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 347/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 348/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 349/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 350/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 351/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 352/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 353/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00353: ReduceLROnPlateau reducing learning rate to 2.91038298062124e-13.
Epoch 354/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 355/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 356/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 357/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 358/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 359/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 360/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 361/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 362/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 363/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00363: ReduceLROnPlateau reducing learning rate to 1.45519149031062e-13.
Epoch 364/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 365/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 366/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 367/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 368/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 369/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 370/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 371/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 372/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 373/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00373: ReduceLROnPlateau reducing learning rate to 7.2759574515531e-14.
Epoch 374/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 375/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 376/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 377/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 378/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 379/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 380/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 381/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 382/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 383/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00383: ReduceLROnPlateau reducing learning rate to 3.63797872577655e-14.
Epoch 384/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 385/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 386/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 387/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 388/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 389/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 390/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 391/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 392/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 393/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00393: ReduceLROnPlateau reducing learning rate to 1.818989362888275e-14.
Epoch 394/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 395/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 396/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 397/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 398/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 399/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 400/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 401/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 402/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 403/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00403: ReduceLROnPlateau reducing learning rate to 9.094946814441375e-15.
Epoch 404/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 405/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 406/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 407/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 408/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 409/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 410/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 411/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 412/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 413/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00413: ReduceLROnPlateau reducing learning rate to 4.5474734072206875e-15.
Epoch 414/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 415/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 416/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 417/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 418/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 419/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 420/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 421/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 422/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 423/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00423: ReduceLROnPlateau reducing learning rate to 2.2737367036103438e-15.
Epoch 424/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 425/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 426/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 427/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 428/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 429/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 430/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 431/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 432/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 433/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00433: ReduceLROnPlateau reducing learning rate to 1.1368683518051719e-15.
Epoch 434/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 435/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 436/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 437/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 438/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 439/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 440/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 441/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 442/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 443/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00443: ReduceLROnPlateau reducing learning rate to 5.684341759025859e-16.
Epoch 444/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 445/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 446/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 447/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 448/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 449/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 450/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 451/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 452/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 453/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00453: ReduceLROnPlateau reducing learning rate to 2.8421708795129297e-16.
Epoch 454/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 455/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 456/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 457/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 458/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 459/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 460/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 461/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 462/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 463/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00463: ReduceLROnPlateau reducing learning rate to 1.4210854397564648e-16.
Epoch 464/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 465/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 466/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 467/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 468/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 469/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 470/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 471/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 472/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 473/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00473: ReduceLROnPlateau reducing learning rate to 7.105427198782324e-17.
Epoch 474/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 475/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 476/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 477/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 478/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 479/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 480/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 481/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 482/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 483/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00483: ReduceLROnPlateau reducing learning rate to 3.552713599391162e-17.
Epoch 484/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 485/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 486/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 487/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 488/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 489/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 490/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 491/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 492/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 493/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00493: ReduceLROnPlateau reducing learning rate to 1.776356799695581e-17.
Epoch 494/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 495/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 496/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 497/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 498/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 499/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 500/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 501/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 502/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 503/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00503: ReduceLROnPlateau reducing learning rate to 8.881783998477905e-18.
Epoch 504/2000
191/191 [==============================] - 0s 152us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 505/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 506/2000
191/191 [==============================] - 0s 126us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 507/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 508/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 509/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 510/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 511/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 512/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 513/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00513: ReduceLROnPlateau reducing learning rate to 4.440891999238953e-18.
Epoch 514/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 515/2000
191/191 [==============================] - 0s 126us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 516/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 517/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 518/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 519/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 520/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 521/2000
191/191 [==============================] - 0s 141us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 522/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 523/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00523: ReduceLROnPlateau reducing learning rate to 2.2204459996194763e-18.
Epoch 524/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 525/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 526/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 527/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 528/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 529/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 530/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 531/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 532/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 533/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00533: ReduceLROnPlateau reducing learning rate to 1.1102229998097382e-18.
Epoch 534/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 535/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 536/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 537/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 538/2000
191/191 [==============================] - 0s 136us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 539/2000
191/191 [==============================] - 0s 126us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 540/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 541/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 542/2000
191/191 [==============================] - ETA: 0s - loss: 0.0396 - accuracy: 1.00 - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 543/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00543: ReduceLROnPlateau reducing learning rate to 5.551114999048691e-19.
Epoch 544/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 545/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 546/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 547/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 548/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 549/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 550/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 551/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 552/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 553/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00553: ReduceLROnPlateau reducing learning rate to 2.7755574995243454e-19.
Epoch 554/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 555/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 556/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 557/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 558/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 559/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 560/2000
191/191 [==============================] - 0s 136us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 561/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 562/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 563/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00563: ReduceLROnPlateau reducing learning rate to 1.3877787497621727e-19.
Epoch 564/2000
191/191 [==============================] - 0s 303us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 565/2000
191/191 [==============================] - 0s 288us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 566/2000
191/191 [==============================] - 0s 194us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 567/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 568/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 569/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 570/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 571/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 572/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 573/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00573: ReduceLROnPlateau reducing learning rate to 6.938893748810864e-20.
Epoch 574/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 575/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 576/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 577/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 578/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 579/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 580/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 581/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 582/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 583/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00583: ReduceLROnPlateau reducing learning rate to 3.469446874405432e-20.
Epoch 584/2000
191/191 [==============================] - 0s 152us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 585/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 586/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 587/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 588/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 589/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 590/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 591/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 592/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 593/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00593: ReduceLROnPlateau reducing learning rate to 1.734723437202716e-20.
Epoch 594/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 595/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 596/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 597/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 598/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 599/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 600/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 601/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 602/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 603/2000
191/191 [==============================] - 0s 173us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00603: ReduceLROnPlateau reducing learning rate to 8.67361718601358e-21.
Epoch 604/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 605/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 606/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 607/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 608/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 609/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 610/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 611/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 612/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 613/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00613: ReduceLROnPlateau reducing learning rate to 4.33680859300679e-21.
Epoch 614/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 615/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 616/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 617/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 618/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 619/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 620/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 621/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 622/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 623/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00623: ReduceLROnPlateau reducing learning rate to 2.168404296503395e-21.
Epoch 624/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 625/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 626/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 627/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 628/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 629/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 630/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 631/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 632/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 633/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00633: ReduceLROnPlateau reducing learning rate to 1.0842021482516974e-21.
Epoch 634/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 635/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 636/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 637/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 638/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 639/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 640/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 641/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 642/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 643/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00643: ReduceLROnPlateau reducing learning rate to 5.421010741258487e-22.
Epoch 644/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 645/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 646/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 647/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 648/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 649/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 650/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 651/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 652/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 653/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00653: ReduceLROnPlateau reducing learning rate to 2.7105053706292436e-22.
Epoch 654/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 655/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 656/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 657/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 658/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 659/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 660/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 661/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 662/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 663/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00663: ReduceLROnPlateau reducing learning rate to 1.3552526853146218e-22.
Epoch 664/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 665/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 666/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 667/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 668/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 669/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 670/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 671/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 672/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 673/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00673: ReduceLROnPlateau reducing learning rate to 6.776263426573109e-23.
Epoch 674/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 675/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 676/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 677/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 678/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 679/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 680/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 681/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 682/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 683/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00683: ReduceLROnPlateau reducing learning rate to 3.3881317132865545e-23.
Epoch 684/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 685/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 686/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 687/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 688/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 689/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 690/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 691/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 692/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 693/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00693: ReduceLROnPlateau reducing learning rate to 1.6940658566432772e-23.
Epoch 694/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 695/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 696/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 697/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 698/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 699/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 700/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 701/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 702/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 703/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00703: ReduceLROnPlateau reducing learning rate to 8.470329283216386e-24.
Epoch 704/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 705/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 706/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 707/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 708/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 709/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 710/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 711/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 712/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 713/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00713: ReduceLROnPlateau reducing learning rate to 4.235164641608193e-24.
Epoch 714/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 715/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 716/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 717/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 718/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 719/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 720/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 721/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 722/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 723/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00723: ReduceLROnPlateau reducing learning rate to 2.1175823208040965e-24.
Epoch 724/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 725/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 726/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 727/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 728/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 729/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 730/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 731/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 732/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 733/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00733: ReduceLROnPlateau reducing learning rate to 1.0587911604020483e-24.
Epoch 734/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 735/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 736/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 737/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 738/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 739/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 740/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 741/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 742/2000
191/191 [==============================] - 0s 79us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 743/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00743: ReduceLROnPlateau reducing learning rate to 5.293955802010241e-25.
Epoch 744/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 745/2000
191/191 [==============================] - ETA: 0s - loss: 0.0317 - accuracy: 1.00 - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 746/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 747/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 748/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 749/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 750/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 751/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 752/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 753/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00753: ReduceLROnPlateau reducing learning rate to 2.6469779010051207e-25.
Epoch 754/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 755/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 756/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 757/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 758/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 759/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 760/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 761/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 762/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 763/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00763: ReduceLROnPlateau reducing learning rate to 1.3234889505025603e-25.
Epoch 764/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 765/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 766/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 767/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 768/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 769/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 770/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 771/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 772/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 773/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00773: ReduceLROnPlateau reducing learning rate to 6.617444752512802e-26.
Epoch 774/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 775/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 776/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 777/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 778/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 779/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 780/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 781/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 782/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 783/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00783: ReduceLROnPlateau reducing learning rate to 3.308722376256401e-26.
Epoch 784/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 785/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 786/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 787/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 788/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 789/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 790/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 791/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 792/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 793/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00793: ReduceLROnPlateau reducing learning rate to 1.6543611881282004e-26.
Epoch 794/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 795/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 796/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 797/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 798/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 799/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 800/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 801/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 802/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 803/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00803: ReduceLROnPlateau reducing learning rate to 8.271805940641002e-27.
Epoch 804/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 805/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 806/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 807/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 808/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 809/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 810/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 811/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 812/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 813/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00813: ReduceLROnPlateau reducing learning rate to 4.135902970320501e-27.
Epoch 814/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 815/2000
191/191 [==============================] - 0s 126us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 816/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 817/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 818/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 819/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 820/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 821/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 822/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 823/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00823: ReduceLROnPlateau reducing learning rate to 2.0679514851602505e-27.
Epoch 824/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 825/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 826/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 827/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 828/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 829/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 830/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 831/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 832/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 833/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00833: ReduceLROnPlateau reducing learning rate to 1.0339757425801253e-27.
Epoch 834/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 835/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 836/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 837/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 838/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 839/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 840/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 841/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 842/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 843/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00843: ReduceLROnPlateau reducing learning rate to 5.169878712900626e-28.
Epoch 844/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 845/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 846/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 847/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 848/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 849/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 850/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 851/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 852/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 853/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00853: ReduceLROnPlateau reducing learning rate to 2.584939356450313e-28.
Epoch 854/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 855/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 856/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 857/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 858/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 859/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 860/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 861/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 862/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 863/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00863: ReduceLROnPlateau reducing learning rate to 1.2924696782251566e-28.
Epoch 864/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 865/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 866/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 867/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 868/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 869/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 870/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 871/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 872/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 873/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00873: ReduceLROnPlateau reducing learning rate to 6.462348391125783e-29.
Epoch 874/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 875/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 876/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 877/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 878/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 879/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 880/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 881/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 882/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 883/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00883: ReduceLROnPlateau reducing learning rate to 3.2311741955628914e-29.
Epoch 884/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 885/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 886/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 887/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 888/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 889/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 890/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 891/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 892/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 893/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00893: ReduceLROnPlateau reducing learning rate to 1.6155870977814457e-29.
Epoch 894/2000
191/191 [==============================] - 0s 79us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 895/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 896/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 897/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 898/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 899/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 900/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 901/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 902/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 903/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00903: ReduceLROnPlateau reducing learning rate to 8.077935488907229e-30.
Epoch 904/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 905/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 906/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 907/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 908/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 909/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 910/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 911/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 912/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 913/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00913: ReduceLROnPlateau reducing learning rate to 4.038967744453614e-30.
Epoch 914/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 915/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 916/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 917/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 918/2000
191/191 [==============================] - ETA: 0s - loss: 0.0296 - accuracy: 1.00 - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 919/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 920/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 921/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 922/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 923/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00923: ReduceLROnPlateau reducing learning rate to 2.019483872226807e-30.
Epoch 924/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 925/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 926/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 927/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 928/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 929/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 930/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 931/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 932/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 933/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00933: ReduceLROnPlateau reducing learning rate to 1.0097419361134036e-30.
Epoch 934/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 935/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 936/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 937/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 938/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 939/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 940/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 941/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 942/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 943/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00943: ReduceLROnPlateau reducing learning rate to 5.048709680567018e-31.
Epoch 944/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 945/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 946/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 947/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 948/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 949/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 950/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 951/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 952/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 953/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00953: ReduceLROnPlateau reducing learning rate to 2.524354840283509e-31.
Epoch 954/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 955/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 956/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 957/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 958/2000
191/191 [==============================] - 0s 79us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 959/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 960/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 961/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 962/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 963/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00963: ReduceLROnPlateau reducing learning rate to 1.2621774201417545e-31.
Epoch 964/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 965/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 966/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 967/2000
191/191 [==============================] - 0s 79us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 968/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 969/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 970/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 971/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 972/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 973/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00973: ReduceLROnPlateau reducing learning rate to 6.310887100708772e-32.
Epoch 974/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 975/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 976/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 977/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 978/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 979/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 980/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 981/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 982/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 983/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00983: ReduceLROnPlateau reducing learning rate to 3.155443550354386e-32.
Epoch 984/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 985/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 986/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 987/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 988/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 989/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 990/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 991/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 992/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 993/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00993: ReduceLROnPlateau reducing learning rate to 1.577721775177193e-32.
Epoch 994/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 995/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 996/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 997/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 998/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 999/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1000/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1001/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1002/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1003/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01003: ReduceLROnPlateau reducing learning rate to 7.888608875885965e-33.
Epoch 1004/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1005/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1006/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1007/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1008/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1009/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1010/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1011/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1012/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1013/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01013: ReduceLROnPlateau reducing learning rate to 3.944304437942983e-33.
Epoch 1014/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1015/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1016/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1017/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1018/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1019/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1020/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1021/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1022/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1023/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01023: ReduceLROnPlateau reducing learning rate to 1.9721522189714914e-33.
Epoch 1024/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1025/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1026/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1027/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1028/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1029/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1030/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1031/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1032/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1033/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01033: ReduceLROnPlateau reducing learning rate to 9.860761094857457e-34.
Epoch 1034/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1035/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1036/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1037/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1038/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1039/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1040/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1041/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1042/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1043/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01043: ReduceLROnPlateau reducing learning rate to 4.930380547428728e-34.
Epoch 1044/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1045/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1046/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1047/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1048/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1049/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1050/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1051/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1052/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1053/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01053: ReduceLROnPlateau reducing learning rate to 2.465190273714364e-34.
Epoch 1054/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1055/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1056/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1057/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1058/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1059/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1060/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1061/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1062/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1063/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01063: ReduceLROnPlateau reducing learning rate to 1.232595136857182e-34.
Epoch 1064/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1065/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1066/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1067/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1068/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1069/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1070/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1071/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1072/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1073/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01073: ReduceLROnPlateau reducing learning rate to 6.16297568428591e-35.
Epoch 1074/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1075/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1076/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1077/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1078/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1079/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1080/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1081/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1082/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1083/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01083: ReduceLROnPlateau reducing learning rate to 3.081487842142955e-35.
Epoch 1084/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1085/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1086/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1087/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1088/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1089/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1090/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1091/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1092/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1093/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01093: ReduceLROnPlateau reducing learning rate to 1.5407439210714776e-35.
Epoch 1094/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1095/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1096/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1097/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1098/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1099/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1100/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1101/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1102/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1103/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01103: ReduceLROnPlateau reducing learning rate to 7.703719605357388e-36.
Epoch 1104/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1105/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1106/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1107/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1108/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1109/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1110/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1111/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1112/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1113/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01113: ReduceLROnPlateau reducing learning rate to 3.851859802678694e-36.
Epoch 1114/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1115/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1116/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1117/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1118/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1119/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1120/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1121/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1122/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1123/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01123: ReduceLROnPlateau reducing learning rate to 1.925929901339347e-36.
Epoch 1124/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1125/2000
191/191 [==============================] - ETA: 0s - loss: 0.0433 - accuracy: 1.00 - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1126/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1127/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1128/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1129/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1130/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1131/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1132/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1133/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01133: ReduceLROnPlateau reducing learning rate to 9.629649506696735e-37.
Epoch 1134/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1135/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1136/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1137/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1138/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1139/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1140/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1141/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1142/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1143/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01143: ReduceLROnPlateau reducing learning rate to 4.8148247533483676e-37.
Epoch 1144/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1145/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1146/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1147/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1148/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1149/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1150/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1151/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1152/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1153/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01153: ReduceLROnPlateau reducing learning rate to 2.4074123766741838e-37.
Epoch 1154/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1155/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1156/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1157/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1158/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1159/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1160/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1161/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1162/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1163/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01163: ReduceLROnPlateau reducing learning rate to 1.2037061883370919e-37.
Epoch 1164/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1165/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1166/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1167/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1168/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1169/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1170/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1171/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1172/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1173/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01173: ReduceLROnPlateau reducing learning rate to 6.018530941685459e-38.
Epoch 1174/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1175/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1176/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1177/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1178/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1179/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1180/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1181/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1182/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1183/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01183: ReduceLROnPlateau reducing learning rate to 3.0092654708427297e-38.
Epoch 1184/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1185/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1186/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1187/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1188/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1189/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1190/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1191/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1192/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1193/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01193: ReduceLROnPlateau reducing learning rate to 1.5046327354213649e-38.
Epoch 1194/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1195/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1196/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1197/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1198/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1199/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1200/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1201/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1202/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1203/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01203: ReduceLROnPlateau reducing learning rate to 7.523163677106824e-39.
Epoch 1204/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1205/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1206/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1207/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1208/2000
191/191 [==============================] - 0s 79us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1209/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1210/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1211/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1212/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1213/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01213: ReduceLROnPlateau reducing learning rate to 3.761581838553412e-39.
Epoch 1214/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1215/2000
191/191 [==============================] - ETA: 0s - loss: 0.0311 - accuracy: 1.00 - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1216/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1217/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1218/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1219/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1220/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1221/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1222/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1223/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01223: ReduceLROnPlateau reducing learning rate to 1.88079056895209e-39.
Epoch 1224/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1225/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1226/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1227/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1228/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1229/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1230/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1231/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1232/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1233/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01233: ReduceLROnPlateau reducing learning rate to 9.40395284476045e-40.
Epoch 1234/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1235/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1236/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1237/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1238/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1239/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1240/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1241/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1242/2000
191/191 [==============================] - 0s 79us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1243/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01243: ReduceLROnPlateau reducing learning rate to 4.701972919134064e-40.
Epoch 1244/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1245/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1246/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1247/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1248/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1249/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1250/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1251/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1252/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1253/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01253: ReduceLROnPlateau reducing learning rate to 2.350986459567032e-40.
Epoch 1254/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1255/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1256/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1257/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1258/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1259/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1260/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1261/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1262/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1263/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01263: ReduceLROnPlateau reducing learning rate to 1.175493229783516e-40.
Epoch 1264/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1265/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1266/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1267/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1268/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1269/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1270/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1271/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1272/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1273/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01273: ReduceLROnPlateau reducing learning rate to 5.87746614891758e-41.
Epoch 1274/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1275/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1276/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1277/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1278/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1279/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1280/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1281/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1282/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1283/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01283: ReduceLROnPlateau reducing learning rate to 2.93873307445879e-41.
Epoch 1284/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1285/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1286/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1287/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1288/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1289/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1290/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1291/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1292/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1293/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01293: ReduceLROnPlateau reducing learning rate to 1.4694015696910032e-41.
Epoch 1294/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1295/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1296/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1297/2000
191/191 [==============================] - ETA: 0s - loss: 0.0313 - accuracy: 1.00 - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1298/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1299/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1300/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1301/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1302/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1303/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01303: ReduceLROnPlateau reducing learning rate to 7.347007848455016e-42.
Epoch 1304/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1305/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1306/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1307/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1308/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1309/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1310/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1311/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1312/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1313/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01313: ReduceLROnPlateau reducing learning rate to 3.673503924227508e-42.
Epoch 1314/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1315/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1316/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1317/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1318/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1319/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1320/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1321/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1322/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1323/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01323: ReduceLROnPlateau reducing learning rate to 1.8371022867298352e-42.
Epoch 1324/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1325/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1326/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1327/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1328/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1329/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1330/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1331/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1332/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1333/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01333: ReduceLROnPlateau reducing learning rate to 9.185511433649176e-43.
Epoch 1334/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1335/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1336/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1337/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1338/2000
191/191 [==============================] - 0s 126us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1339/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1340/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1341/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1342/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1343/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01343: ReduceLROnPlateau reducing learning rate to 4.5962589629854e-43.
Epoch 1344/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1345/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1346/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1347/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1348/2000
191/191 [==============================] - 0s 126us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1349/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1350/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1351/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1352/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1353/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01353: ReduceLROnPlateau reducing learning rate to 2.2981294814927e-43.
Epoch 1354/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1355/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1356/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1357/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1358/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1359/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1360/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1361/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1362/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1363/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01363: ReduceLROnPlateau reducing learning rate to 1.14906474074635e-43.
Epoch 1364/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1365/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1366/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1367/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1368/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1369/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1370/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1371/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1372/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1373/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01373: ReduceLROnPlateau reducing learning rate to 5.74532370373175e-44.
Epoch 1374/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1375/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1376/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1377/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1378/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1379/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1380/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1381/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1382/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1383/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01383: ReduceLROnPlateau reducing learning rate to 2.872661851865875e-44.
Epoch 1384/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1385/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1386/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1387/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1388/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1389/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1390/2000
191/191 [==============================] - ETA: 0s - loss: 0.0479 - accuracy: 1.00 - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1391/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1392/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1393/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01393: ReduceLROnPlateau reducing learning rate to 1.401298464324817e-44.
Epoch 1394/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1395/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1396/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1397/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1398/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1399/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1400/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1401/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1402/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1403/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01403: ReduceLROnPlateau reducing learning rate to 7.006492321624085e-45.
Epoch 1404/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1405/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1406/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1407/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1408/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1409/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1410/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1411/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1412/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1413/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01413: ReduceLROnPlateau reducing learning rate to 3.5032461608120427e-45.
Epoch 1414/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1415/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1416/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1417/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1418/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1419/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1420/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1421/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1422/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1423/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01423: ReduceLROnPlateau reducing learning rate to 1.401298464324817e-45.
Epoch 1424/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1425/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1426/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1427/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1428/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1429/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1430/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1431/2000
191/191 [==============================] - ETA: 0s - loss: 0.0420 - accuracy: 1.00 - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1432/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1433/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01433: ReduceLROnPlateau reducing learning rate to 7.006492321624085e-46.
Epoch 1434/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1435/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1436/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1437/2000
191/191 [==============================] - 0s 126us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1438/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1439/2000
191/191 [==============================] - 0s 126us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1440/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1441/2000
191/191 [==============================] - 0s 136us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1442/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1443/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1444/2000
191/191 [==============================] - 0s 131us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1445/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1446/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1447/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1448/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1449/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1450/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1451/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1452/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1453/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1454/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1455/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1456/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1457/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1458/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1459/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1460/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1461/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1462/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1463/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1464/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1465/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1466/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1467/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1468/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1469/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1470/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1471/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1472/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1473/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1474/2000
191/191 [==============================] - ETA: 0s - loss: 0.0491 - accuracy: 1.00 - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1475/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1476/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1477/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1478/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1479/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1480/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1481/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1482/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1483/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1484/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1485/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1486/2000
191/191 [==============================] - 0s 147us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1487/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1488/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1489/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1490/2000
191/191 [==============================] - 0s 114us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1491/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1492/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1493/2000
191/191 [==============================] - 0s 109us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1494/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1495/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1496/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1497/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1498/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1499/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1500/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1501/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1502/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1503/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1504/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1505/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1506/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1507/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1508/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1509/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1510/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1511/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1512/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1513/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1514/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1515/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1516/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1517/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1518/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1519/2000
191/191 [==============================] - 0s 95us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1520/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1521/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1522/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1523/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1524/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1525/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1526/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1527/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1528/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1529/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1530/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1531/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1532/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1533/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1534/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1535/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1536/2000
191/191 [==============================] - 0s 131us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1537/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1538/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1539/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1540/2000
191/191 [==============================] - ETA: 0s - loss: 0.0384 - accuracy: 1.00 - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1541/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1542/2000
191/191 [==============================] - 0s 188us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1543/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1544/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1545/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1546/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1547/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1548/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1549/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1550/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1551/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1552/2000
191/191 [==============================] - 0s 136us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1553/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1554/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1555/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1556/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1557/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1558/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1559/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1560/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1561/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1562/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1563/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1564/2000
191/191 [==============================] - ETA: 0s - loss: 0.0228 - accuracy: 1.00 - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1565/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1566/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1567/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1568/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1569/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1570/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1571/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1572/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1573/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1574/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1575/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1576/2000
191/191 [==============================] - 0s 111us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1577/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1578/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1579/2000
191/191 [==============================] - 0s 102us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1580/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1581/2000
191/191 [==============================] - 0s 126us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1582/2000
191/191 [==============================] - 0s 126us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1583/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1584/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1585/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1586/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1587/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1588/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1589/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1590/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1591/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1592/2000
191/191 [==============================] - 0s 101us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1593/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1594/2000
191/191 [==============================] - 0s 112us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1595/2000
191/191 [==============================] - 0s 106us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1596/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1597/2000
191/191 [==============================] - ETA: 0s - loss: 0.0252 - accuracy: 1.00 - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1598/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1599/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1600/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1601/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1602/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1603/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1604/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1605/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1606/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1607/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1608/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1609/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1610/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1611/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1612/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1613/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1614/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1615/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1616/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1617/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1618/2000
191/191 [==============================] - 0s 126us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1619/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1620/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1621/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1622/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1623/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1624/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1625/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1626/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1627/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1628/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1629/2000
191/191 [==============================] - 0s 136us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1630/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1631/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1632/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1633/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1634/2000
191/191 [==============================] - 0s 126us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1635/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1636/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1637/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1638/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1639/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1640/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1641/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1642/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1643/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1644/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1645/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1646/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1647/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1648/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1649/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1650/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1651/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1652/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1653/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1654/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1655/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1656/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1657/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1658/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1659/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1660/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1661/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1662/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1663/2000
191/191 [==============================] - 0s 152us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1664/2000
191/191 [==============================] - 0s 136us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1665/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1666/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1667/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1668/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1669/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1670/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1671/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1672/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1673/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1674/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1675/2000
191/191 [==============================] - 0s 131us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1676/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1677/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1678/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1679/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1680/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1681/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1682/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1683/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1684/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1685/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1686/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1687/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1688/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1689/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1690/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1691/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1692/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1693/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1694/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1695/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1696/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1697/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1698/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1699/2000
191/191 [==============================] - 0s 131us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1700/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1701/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1702/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1703/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1704/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1705/2000
191/191 [==============================] - 0s 79us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1706/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1707/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1708/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1709/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1710/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1711/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1712/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1713/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1714/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1715/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1716/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1717/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1718/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1719/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1720/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1721/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1722/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1723/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1724/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1725/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1726/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1727/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1728/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1729/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1730/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1731/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1732/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1733/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1734/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1735/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1736/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1737/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1738/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1739/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1740/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1741/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1742/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1743/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1744/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1745/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1746/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1747/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1748/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1749/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1750/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1751/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1752/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1753/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1754/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1755/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1756/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1757/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1758/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1759/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1760/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1761/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1762/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1763/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1764/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1765/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1766/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1767/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1768/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1769/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1770/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1771/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1772/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1773/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1774/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1775/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1776/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1777/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1778/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1779/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1780/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1781/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1782/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1783/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1784/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1785/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1786/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1787/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1788/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1789/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1790/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1791/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1792/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1793/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1794/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1795/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1796/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1797/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1798/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1799/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1800/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1801/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1802/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1803/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1804/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1805/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1806/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1807/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1808/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1809/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1810/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1811/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1812/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1813/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1814/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1815/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1816/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1817/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1818/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1819/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1820/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1821/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1822/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1823/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1824/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1825/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1826/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1827/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1828/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1829/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1830/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1831/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1832/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1833/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1834/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1835/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1836/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1837/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1838/2000
191/191 [==============================] - 0s 79us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1839/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1840/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1841/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1842/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1843/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1844/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1845/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1846/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1847/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1848/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1849/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1850/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1851/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1852/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1853/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1854/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1855/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1856/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1857/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1858/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1859/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1860/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1861/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1862/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1863/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1864/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1865/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1866/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1867/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1868/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1869/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1870/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1871/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1872/2000
191/191 [==============================] - 0s 79us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1873/2000
191/191 [==============================] - ETA: 0s - loss: 0.0489 - accuracy: 1.00 - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1874/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1875/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1876/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1877/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1878/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1879/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1880/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1881/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1882/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1883/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1884/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1885/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1886/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1887/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1888/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1889/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1890/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1891/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1892/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1893/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1894/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1895/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1896/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1897/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1898/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1899/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1900/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1901/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1902/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1903/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1904/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1905/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1906/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1907/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1908/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1909/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1910/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1911/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1912/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1913/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1914/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1915/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1916/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1917/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1918/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1919/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1920/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1921/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1922/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1923/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1924/2000
191/191 [==============================] - 0s 79us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1925/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1926/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1927/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1928/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1929/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1930/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1931/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1932/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1933/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1934/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1935/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1936/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1937/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1938/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1939/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1940/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1941/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1942/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1943/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1944/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1945/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1946/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1947/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1948/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1949/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1950/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1951/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1952/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1953/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1954/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1955/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1956/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1957/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1958/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1959/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1960/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1961/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1962/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1963/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1964/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1965/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1966/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1967/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1968/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1969/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1970/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1971/2000
191/191 [==============================] - ETA: 0s - loss: 0.0400 - accuracy: 1.00 - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1972/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1973/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1974/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1975/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1976/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1977/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1978/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1979/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1980/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1981/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1982/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1983/2000
191/191 [==============================] - ETA: 0s - loss: 0.0319 - accuracy: 1.00 - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1984/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1985/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1986/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1987/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1988/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1989/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1990/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1991/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1992/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1993/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1994/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1995/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1996/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1997/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1998/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1999/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 2000/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
In [57]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 2000)
In [58]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
64/64 [==============================] - 0s 47us/step
test loss: 0.8193266093730927, test accuracy: 0.765625
In [64]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.7282608695652174
In [65]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
Kappa:  0.366754617414248

KMeans

In [66]:
X
Out[66]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13
0 -0.339415 0.847773 0.497198 -0.389310 1.225458 1.947033 -0.736267 0.492219 0.576682 1.504697 -1.796460 0.724954 0.958600
1 0.587658 -1.195426 0.636375 0.199876 0.765321 0.061181 0.379367 -0.440867 0.232893 1.339920 0.110001 0.807525 0.815678
2 1.465595 -2.307943 0.354567 -0.058273 -1.298853 -0.811453 -1.551580 -3.934320 -1.079432 2.546130 1.421407 0.639359 0.199094
3 0.749403 -1.690498 -0.125200 -1.016135 0.825845 0.271444 -0.104786 -0.992141 0.049182 1.425948 -0.343269 -0.789558 -0.411898
4 -0.280577 0.393332 0.744917 2.411400 -0.777421 -0.420018 1.258355 -1.544565 -0.498071 0.421527 -0.632908 -0.056846 -0.072348
5 -0.158690 0.404891 -0.147920 -0.299241 -0.786974 0.697216 0.290501 0.019739 -1.468086 -0.346174 -0.086965 0.026492 1.019512
6 1.646777 0.772744 -1.425228 -0.562610 -1.556076 0.533289 -0.404271 1.676958 0.979516 0.415548 0.544719 0.433332 0.204271
7 1.124970 0.506236 0.738993 1.984485 -0.928706 -0.494097 -0.707105 -0.494778 -1.642929 0.207467 0.181382 2.431721 0.848697
8 0.920059 1.438862 -2.048354 1.503567 -2.801303 0.567132 -0.745441 0.569519 0.130917 1.965436 -0.034797 1.164878 0.074074
9 0.182544 0.310622 0.067722 0.870138 0.168366 0.682045 -0.191296 -0.144962 -0.630020 -0.284032 -0.315301 0.344841 0.495167
10 0.168663 0.389450 0.034360 1.213392 0.248437 0.870618 -0.460824 -0.174734 -0.710502 -0.228408 -0.265153 0.349416 0.584114
11 0.153010 -0.118336 0.639531 1.504522 0.937909 0.356048 -0.089987 -0.628522 0.064203 0.966049 0.403915 -0.943626 0.173874
12 0.132578 0.261966 -2.871493 -3.398160 -0.256458 1.596532 -0.358711 0.175955 -0.499075 0.949085 2.235525 -0.197712 -0.272366
13 1.094629 0.885150 -1.130672 -0.083270 0.672482 0.750453 -0.863949 0.140540 0.423312 -0.305155 -0.424905 0.318660 0.885900
14 0.771472 0.364448 -0.454696 0.434253 0.912699 0.745924 -0.073390 -0.406473 0.450765 0.323180 -0.458826 -0.132295 0.495454
15 0.677561 0.166795 0.746471 0.075191 0.867924 -1.621678 0.771146 -0.067286 0.557998 -0.093593 0.020233 -0.800013 -0.629188
16 -0.032353 1.227345 -0.188580 0.927210 0.016663 1.001867 -0.473811 0.782387 1.542760 -0.345478 -0.838104 -0.439443 1.179204
17 0.459031 1.258961 -0.329412 1.391790 -0.208888 1.059241 -1.245671 0.619153 0.245780 0.644548 -0.602629 -0.928581 0.739885
18 -0.359172 0.051214 -0.603962 0.778896 1.630471 1.802477 1.486205 -0.140738 -0.894366 0.736624 2.114721 1.078175 -0.965785
19 0.209859 -0.615399 -0.676895 0.735655 0.805509 -0.696793 1.073068 0.240429 -0.205934 -0.759693 0.672843 0.569482 -0.455391
20 0.127381 -0.265099 -0.258801 -0.127568 0.649447 0.244473 1.897421 -0.344616 -0.593159 0.065147 1.787607 1.219355 -0.171813
21 1.222717 0.409860 1.311826 0.703873 0.322062 0.305461 -0.522644 -0.750833 0.001767 0.017953 0.254329 -0.227762 -0.614790
22 1.173352 0.490500 0.742825 -0.028159 -0.272396 -0.502733 -0.759443 -1.031924 -0.157975 0.075659 0.604220 0.143298 -0.001849
23 1.069960 0.858822 -0.795544 0.076688 0.851875 0.735014 -0.758779 0.065595 0.532667 -0.391858 -0.497019 0.240822 0.848126
24 0.581377 -0.804045 0.399887 1.535671 0.245878 0.904192 -0.233991 -0.925983 0.212280 0.499535 -0.024926 -0.925999 1.294925
25 0.161110 0.025075 0.716318 1.532230 0.889883 0.353167 -0.058787 -0.593046 0.093773 0.927085 0.199691 -0.979872 0.232850
26 0.431443 0.442713 0.259120 0.045533 0.102675 0.367606 0.054320 0.942924 0.180609 0.550983 0.265291 0.321252 -0.830969
27 0.344525 -1.140315 -0.725453 -0.547965 0.449924 0.303904 1.053624 1.051712 0.509322 0.181611 -0.519979 -1.134490 -1.439105
28 -0.041565 0.671274 0.195143 0.247294 0.531620 1.050124 0.311358 0.988161 -0.198869 0.387795 1.757366 1.351684 0.194840
29 0.417845 -1.134173 -0.760709 -0.605264 0.077464 0.533333 1.104524 2.124971 0.083548 0.801730 0.092534 -1.281628 -1.468782
... ... ... ... ... ... ... ... ... ... ... ... ... ...
225 1.532114 -1.060006 -0.434145 -0.999435 -1.259462 0.039140 -0.802013 -0.655286 0.714448 1.005958 -0.086372 0.537392 0.054440
226 -0.942320 1.172080 0.506725 -0.230675 -0.104635 0.898742 -1.107001 -1.182148 -0.940991 0.232366 1.778224 0.975251 1.731084
227 1.421974 0.631029 -0.563813 -0.694595 -0.673270 0.929022 0.476907 -1.025173 -0.813644 -0.060006 -0.738730 -0.558099 0.057654
228 -1.473385 -0.806223 1.849423 -1.252541 0.941013 -0.872947 -1.812392 -0.242718 -0.097212 -0.510500 -0.232195 -0.546399 0.945530
229 -1.135926 -0.772372 1.164844 -1.022517 0.630202 -0.496999 -1.101656 -0.168921 -0.295159 -0.587401 0.369033 -0.266325 0.604469
230 -1.085049 0.879566 0.442593 0.128917 0.393498 0.531555 0.392194 1.418515 0.891015 -0.348926 -0.756201 -0.838584 -0.015971
231 -0.352258 0.556982 0.530520 0.443818 0.300921 0.032128 -0.797384 -0.573532 0.398084 0.328875 -0.274964 -1.300920 0.254456
232 -1.190363 0.797356 0.758472 0.587917 0.890540 0.471925 0.105793 0.680721 0.230834 -0.150709 -0.816744 -0.470618 0.371198
233 -0.651003 -0.586618 1.326854 -0.451354 0.507113 0.165474 -0.919675 -0.448249 -1.310940 -1.372737 0.406029 -1.414627 -0.434858
234 -1.459511 -0.516281 1.631699 -1.141842 0.584621 -0.458541 -1.428877 -0.934556 -0.216455 -0.049794 0.095580 0.387068 0.693730
235 -0.726984 0.702447 0.798069 -0.320660 0.530902 1.019988 0.144995 0.207847 0.039592 0.220761 0.762941 0.575034 0.671517
236 -0.300986 -0.404923 0.715406 0.245380 -0.427936 -0.334843 -0.228084 -0.330898 -0.674327 0.199560 0.827455 0.016433 0.866789
237 -0.736244 0.088611 0.910051 0.437100 0.258256 0.363828 -0.415290 -0.717445 -0.012727 0.436925 -0.786954 -1.217376 0.352825
238 0.610473 -2.664315 1.303652 -2.022376 1.500032 -1.280926 -1.249533 0.432111 -0.768558 0.291156 -0.092312 0.053770 -0.401166
239 -2.045424 -2.954642 0.302601 -0.868092 -1.038134 -1.230777 0.514329 0.057591 -1.023895 0.275395 -1.450282 0.386242 0.318763
240 0.329793 -1.367570 -1.454329 -0.207924 -0.723609 -0.149025 -0.085298 -0.011595 -0.240239 -0.009120 -0.325229 -0.025722 0.114182
241 -1.919591 1.382172 -0.134161 0.837967 -0.687780 0.944303 -0.258652 -0.742178 0.386031 -1.178099 -1.843543 -0.710556 -0.318561
242 -2.087669 1.400006 -0.494964 0.451717 -0.759188 0.736625 0.133121 -0.196031 1.121231 0.474128 -0.345937 -0.409324 -0.442069
243 -2.131652 0.439305 -0.612226 0.854126 -0.494550 0.825299 0.301373 -0.018964 0.690556 -0.078762 -0.709495 -0.075857 -0.418656
244 -1.611989 -0.756403 -0.410917 1.075909 0.297336 -1.317576 1.115011 -0.467065 -0.768378 1.615499 1.611125 -1.018782 -1.798744
245 -0.142010 0.000190 -0.063461 -0.506353 -0.386942 -0.256144 0.270621 -1.497417 0.507892 0.456828 -0.431169 -0.978417 0.015849
246 -1.263975 -1.168117 -1.396090 -0.312016 1.862268 1.400290 0.646060 -0.686864 0.418524 -0.069926 -0.653856 -0.853617 -0.106814
247 -0.507700 0.899825 1.510153 1.083642 2.081451 0.589016 0.901321 0.658808 0.152596 0.176442 -0.447633 0.287838 0.650479
248 -0.159768 0.518093 2.197018 0.698491 0.476336 -2.014255 -1.614667 -0.397282 -1.781932 -0.208894 1.650551 -0.771436 -0.987237
249 -1.037899 1.016712 2.774230 0.665468 -0.385673 0.587263 -0.121609 -0.331379 0.622484 -0.387131 -0.276584 0.218207 1.689216
250 -0.526923 -1.169944 0.474875 -0.789231 0.369827 -0.537003 -1.089843 -0.173366 -0.023237 -0.142334 0.740065 0.813114 0.872556
251 -0.770856 -1.024349 -0.019140 -0.097521 0.092703 0.369242 -0.273901 0.190740 -0.074032 0.113055 0.140291 -0.696275 0.166679
252 -0.905458 -0.790575 0.206164 -0.723816 -0.444860 0.107833 -0.734514 -0.533865 -0.634334 0.320526 0.088428 -0.348210 0.347201
253 -1.378235 -0.338405 0.016815 -0.394563 0.034043 1.023865 -0.303960 -1.316121 0.198697 0.670577 0.809574 0.580565 0.056004
254 -0.199959 -2.035812 -0.904507 -1.511975 -0.437843 0.262972 -1.943788 -1.963300 -2.256227 0.354369 -0.039829 0.882325 0.139307

255 rows × 13 columns

In [67]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[67]:
[3315.0,
 2972.7888695817974,
 2748.18187155972,
 2544.9420084212106,
 2413.687059384553,
 2278.037996783226,
 2213.3487507256823,
 2123.4282707474663,
 2067.8299633414163,
 1977.777252698108,
 1956.5229777214513,
 1880.0296166971755,
 1815.5096049846275,
 1785.9955747862728]
In [68]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[68]:
[<matplotlib.lines.Line2D at 0x1e82ae84f98>]

K=6

In [69]:
kmeans_mfcc = KMeans(n_clusters=6, random_state=0, n_init=10)
kmeans_mfcc.fit(X)
Out[69]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=6, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [70]:
kmeans_mfcc.labels_
Out[70]:
array([4, 2, 2, 2, 0, 1, 1, 0, 1, 4, 4, 4, 1, 4, 4, 3, 4, 4, 4, 4, 4, 0,
       0, 4, 4, 4, 4, 3, 4, 3, 4, 4, 3, 1, 3, 1, 4, 4, 3, 1, 1, 1, 4, 4,
       2, 1, 1, 1, 4, 1, 1, 1, 4, 3, 3, 4, 4, 1, 4, 1, 0, 4, 4, 4, 3, 3,
       3, 4, 3, 0, 4, 2, 1, 4, 0, 4, 4, 3, 3, 0, 0, 2, 1, 4, 1, 0, 0, 3,
       4, 1, 4, 4, 4, 4, 1, 1, 0, 3, 3, 3, 4, 1, 1, 4, 1, 4, 0, 3, 1, 1,
       1, 0, 3, 2, 4, 4, 0, 1, 5, 3, 4, 0, 0, 2, 1, 0, 0, 0, 2, 2, 2, 2,
       4, 4, 0, 0, 2, 2, 2, 4, 4, 4, 2, 2, 2, 3, 0, 4, 1, 3, 3, 3, 3, 3,
       4, 3, 1, 0, 0, 0, 1, 1, 0, 2, 2, 0, 3, 0, 1, 0, 0, 0, 0, 1, 3, 2,
       4, 4, 1, 2, 4, 3, 2, 3, 4, 4, 4, 1, 2, 3, 3, 0, 2, 2, 2, 1, 0, 1,
       4, 2, 0, 4, 3, 3, 2, 3, 5, 5, 2, 3, 3, 2, 0, 3, 0, 2, 4, 2, 1, 2,
       1, 3, 4, 4, 4, 1, 0, 1, 2, 2, 3, 0, 4, 2, 2, 4, 0, 2, 2, 2, 1, 3,
       4, 4, 2, 2, 2, 4, 0, 0, 2, 2, 2, 2, 2])
In [71]:
clusters_mfcc = kmeans_mfcc.predict(X)
clusters_mfcc
Out[71]:
array([4, 2, 2, 2, 0, 1, 1, 0, 1, 4, 4, 4, 1, 4, 4, 3, 4, 4, 4, 4, 4, 0,
       0, 4, 4, 4, 4, 3, 4, 3, 4, 4, 3, 1, 3, 1, 4, 4, 3, 1, 1, 1, 4, 4,
       2, 1, 1, 1, 4, 1, 1, 1, 4, 3, 3, 4, 4, 1, 4, 1, 0, 4, 4, 4, 3, 3,
       3, 4, 3, 0, 4, 2, 1, 4, 0, 4, 4, 3, 3, 0, 0, 2, 1, 4, 1, 0, 0, 3,
       4, 1, 4, 4, 4, 4, 1, 1, 0, 3, 3, 3, 4, 1, 1, 4, 1, 4, 0, 3, 1, 1,
       1, 0, 3, 2, 4, 4, 0, 1, 5, 3, 4, 0, 0, 2, 1, 0, 0, 0, 2, 2, 2, 2,
       4, 4, 0, 0, 2, 2, 2, 4, 4, 4, 2, 2, 2, 3, 0, 4, 1, 3, 3, 3, 3, 3,
       4, 3, 1, 0, 0, 0, 1, 1, 0, 2, 2, 0, 3, 0, 1, 0, 0, 0, 0, 1, 3, 2,
       4, 4, 1, 2, 4, 3, 2, 3, 4, 4, 4, 1, 2, 3, 3, 0, 2, 2, 2, 1, 0, 1,
       4, 2, 0, 4, 3, 3, 2, 3, 5, 5, 2, 3, 3, 2, 0, 3, 0, 2, 4, 2, 1, 2,
       1, 3, 4, 4, 4, 1, 0, 1, 2, 2, 3, 0, 4, 2, 2, 4, 0, 2, 2, 2, 1, 3,
       4, 4, 2, 2, 2, 4, 0, 0, 2, 2, 2, 2, 2])
In [72]:
X.loc[:,'Cluster'] = clusters_mfcc
X.loc[:,'chosen'] = list(y)
In [73]:
X
Out[73]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13 Cluster chosen
0 -0.339415 0.847773 0.497198 -0.389310 1.225458 1.947033 -0.736267 0.492219 0.576682 1.504697 -1.796460 0.724954 0.958600 4 0
1 0.587658 -1.195426 0.636375 0.199876 0.765321 0.061181 0.379367 -0.440867 0.232893 1.339920 0.110001 0.807525 0.815678 2 0
2 1.465595 -2.307943 0.354567 -0.058273 -1.298853 -0.811453 -1.551580 -3.934320 -1.079432 2.546130 1.421407 0.639359 0.199094 2 0
3 0.749403 -1.690498 -0.125200 -1.016135 0.825845 0.271444 -0.104786 -0.992141 0.049182 1.425948 -0.343269 -0.789558 -0.411898 2 0
4 -0.280577 0.393332 0.744917 2.411400 -0.777421 -0.420018 1.258355 -1.544565 -0.498071 0.421527 -0.632908 -0.056846 -0.072348 0 0
5 -0.158690 0.404891 -0.147920 -0.299241 -0.786974 0.697216 0.290501 0.019739 -1.468086 -0.346174 -0.086965 0.026492 1.019512 1 0
6 1.646777 0.772744 -1.425228 -0.562610 -1.556076 0.533289 -0.404271 1.676958 0.979516 0.415548 0.544719 0.433332 0.204271 1 0
7 1.124970 0.506236 0.738993 1.984485 -0.928706 -0.494097 -0.707105 -0.494778 -1.642929 0.207467 0.181382 2.431721 0.848697 0 0
8 0.920059 1.438862 -2.048354 1.503567 -2.801303 0.567132 -0.745441 0.569519 0.130917 1.965436 -0.034797 1.164878 0.074074 1 0
9 0.182544 0.310622 0.067722 0.870138 0.168366 0.682045 -0.191296 -0.144962 -0.630020 -0.284032 -0.315301 0.344841 0.495167 4 0
10 0.168663 0.389450 0.034360 1.213392 0.248437 0.870618 -0.460824 -0.174734 -0.710502 -0.228408 -0.265153 0.349416 0.584114 4 0
11 0.153010 -0.118336 0.639531 1.504522 0.937909 0.356048 -0.089987 -0.628522 0.064203 0.966049 0.403915 -0.943626 0.173874 4 0
12 0.132578 0.261966 -2.871493 -3.398160 -0.256458 1.596532 -0.358711 0.175955 -0.499075 0.949085 2.235525 -0.197712 -0.272366 1 0
13 1.094629 0.885150 -1.130672 -0.083270 0.672482 0.750453 -0.863949 0.140540 0.423312 -0.305155 -0.424905 0.318660 0.885900 4 0
14 0.771472 0.364448 -0.454696 0.434253 0.912699 0.745924 -0.073390 -0.406473 0.450765 0.323180 -0.458826 -0.132295 0.495454 4 0
15 0.677561 0.166795 0.746471 0.075191 0.867924 -1.621678 0.771146 -0.067286 0.557998 -0.093593 0.020233 -0.800013 -0.629188 3 0
16 -0.032353 1.227345 -0.188580 0.927210 0.016663 1.001867 -0.473811 0.782387 1.542760 -0.345478 -0.838104 -0.439443 1.179204 4 0
17 0.459031 1.258961 -0.329412 1.391790 -0.208888 1.059241 -1.245671 0.619153 0.245780 0.644548 -0.602629 -0.928581 0.739885 4 0
18 -0.359172 0.051214 -0.603962 0.778896 1.630471 1.802477 1.486205 -0.140738 -0.894366 0.736624 2.114721 1.078175 -0.965785 4 0
19 0.209859 -0.615399 -0.676895 0.735655 0.805509 -0.696793 1.073068 0.240429 -0.205934 -0.759693 0.672843 0.569482 -0.455391 4 0
20 0.127381 -0.265099 -0.258801 -0.127568 0.649447 0.244473 1.897421 -0.344616 -0.593159 0.065147 1.787607 1.219355 -0.171813 4 0
21 1.222717 0.409860 1.311826 0.703873 0.322062 0.305461 -0.522644 -0.750833 0.001767 0.017953 0.254329 -0.227762 -0.614790 0 0
22 1.173352 0.490500 0.742825 -0.028159 -0.272396 -0.502733 -0.759443 -1.031924 -0.157975 0.075659 0.604220 0.143298 -0.001849 0 0
23 1.069960 0.858822 -0.795544 0.076688 0.851875 0.735014 -0.758779 0.065595 0.532667 -0.391858 -0.497019 0.240822 0.848126 4 0
24 0.581377 -0.804045 0.399887 1.535671 0.245878 0.904192 -0.233991 -0.925983 0.212280 0.499535 -0.024926 -0.925999 1.294925 4 0
25 0.161110 0.025075 0.716318 1.532230 0.889883 0.353167 -0.058787 -0.593046 0.093773 0.927085 0.199691 -0.979872 0.232850 4 0
26 0.431443 0.442713 0.259120 0.045533 0.102675 0.367606 0.054320 0.942924 0.180609 0.550983 0.265291 0.321252 -0.830969 4 0
27 0.344525 -1.140315 -0.725453 -0.547965 0.449924 0.303904 1.053624 1.051712 0.509322 0.181611 -0.519979 -1.134490 -1.439105 3 0
28 -0.041565 0.671274 0.195143 0.247294 0.531620 1.050124 0.311358 0.988161 -0.198869 0.387795 1.757366 1.351684 0.194840 4 0
29 0.417845 -1.134173 -0.760709 -0.605264 0.077464 0.533333 1.104524 2.124971 0.083548 0.801730 0.092534 -1.281628 -1.468782 3 0
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
225 1.532114 -1.060006 -0.434145 -0.999435 -1.259462 0.039140 -0.802013 -0.655286 0.714448 1.005958 -0.086372 0.537392 0.054440 1 1
226 -0.942320 1.172080 0.506725 -0.230675 -0.104635 0.898742 -1.107001 -1.182148 -0.940991 0.232366 1.778224 0.975251 1.731084 0 1
227 1.421974 0.631029 -0.563813 -0.694595 -0.673270 0.929022 0.476907 -1.025173 -0.813644 -0.060006 -0.738730 -0.558099 0.057654 1 1
228 -1.473385 -0.806223 1.849423 -1.252541 0.941013 -0.872947 -1.812392 -0.242718 -0.097212 -0.510500 -0.232195 -0.546399 0.945530 2 1
229 -1.135926 -0.772372 1.164844 -1.022517 0.630202 -0.496999 -1.101656 -0.168921 -0.295159 -0.587401 0.369033 -0.266325 0.604469 2 1
230 -1.085049 0.879566 0.442593 0.128917 0.393498 0.531555 0.392194 1.418515 0.891015 -0.348926 -0.756201 -0.838584 -0.015971 3 1
231 -0.352258 0.556982 0.530520 0.443818 0.300921 0.032128 -0.797384 -0.573532 0.398084 0.328875 -0.274964 -1.300920 0.254456 0 1
232 -1.190363 0.797356 0.758472 0.587917 0.890540 0.471925 0.105793 0.680721 0.230834 -0.150709 -0.816744 -0.470618 0.371198 4 1
233 -0.651003 -0.586618 1.326854 -0.451354 0.507113 0.165474 -0.919675 -0.448249 -1.310940 -1.372737 0.406029 -1.414627 -0.434858 2 1
234 -1.459511 -0.516281 1.631699 -1.141842 0.584621 -0.458541 -1.428877 -0.934556 -0.216455 -0.049794 0.095580 0.387068 0.693730 2 1
235 -0.726984 0.702447 0.798069 -0.320660 0.530902 1.019988 0.144995 0.207847 0.039592 0.220761 0.762941 0.575034 0.671517 4 1
236 -0.300986 -0.404923 0.715406 0.245380 -0.427936 -0.334843 -0.228084 -0.330898 -0.674327 0.199560 0.827455 0.016433 0.866789 0 1
237 -0.736244 0.088611 0.910051 0.437100 0.258256 0.363828 -0.415290 -0.717445 -0.012727 0.436925 -0.786954 -1.217376 0.352825 2 1
238 0.610473 -2.664315 1.303652 -2.022376 1.500032 -1.280926 -1.249533 0.432111 -0.768558 0.291156 -0.092312 0.053770 -0.401166 2 1
239 -2.045424 -2.954642 0.302601 -0.868092 -1.038134 -1.230777 0.514329 0.057591 -1.023895 0.275395 -1.450282 0.386242 0.318763 2 1
240 0.329793 -1.367570 -1.454329 -0.207924 -0.723609 -0.149025 -0.085298 -0.011595 -0.240239 -0.009120 -0.325229 -0.025722 0.114182 1 1
241 -1.919591 1.382172 -0.134161 0.837967 -0.687780 0.944303 -0.258652 -0.742178 0.386031 -1.178099 -1.843543 -0.710556 -0.318561 3 1
242 -2.087669 1.400006 -0.494964 0.451717 -0.759188 0.736625 0.133121 -0.196031 1.121231 0.474128 -0.345937 -0.409324 -0.442069 4 1
243 -2.131652 0.439305 -0.612226 0.854126 -0.494550 0.825299 0.301373 -0.018964 0.690556 -0.078762 -0.709495 -0.075857 -0.418656 4 1
244 -1.611989 -0.756403 -0.410917 1.075909 0.297336 -1.317576 1.115011 -0.467065 -0.768378 1.615499 1.611125 -1.018782 -1.798744 2 1
245 -0.142010 0.000190 -0.063461 -0.506353 -0.386942 -0.256144 0.270621 -1.497417 0.507892 0.456828 -0.431169 -0.978417 0.015849 2 1
246 -1.263975 -1.168117 -1.396090 -0.312016 1.862268 1.400290 0.646060 -0.686864 0.418524 -0.069926 -0.653856 -0.853617 -0.106814 2 1
247 -0.507700 0.899825 1.510153 1.083642 2.081451 0.589016 0.901321 0.658808 0.152596 0.176442 -0.447633 0.287838 0.650479 4 1
248 -0.159768 0.518093 2.197018 0.698491 0.476336 -2.014255 -1.614667 -0.397282 -1.781932 -0.208894 1.650551 -0.771436 -0.987237 0 1
249 -1.037899 1.016712 2.774230 0.665468 -0.385673 0.587263 -0.121609 -0.331379 0.622484 -0.387131 -0.276584 0.218207 1.689216 0 1
250 -0.526923 -1.169944 0.474875 -0.789231 0.369827 -0.537003 -1.089843 -0.173366 -0.023237 -0.142334 0.740065 0.813114 0.872556 2 1
251 -0.770856 -1.024349 -0.019140 -0.097521 0.092703 0.369242 -0.273901 0.190740 -0.074032 0.113055 0.140291 -0.696275 0.166679 2 1
252 -0.905458 -0.790575 0.206164 -0.723816 -0.444860 0.107833 -0.734514 -0.533865 -0.634334 0.320526 0.088428 -0.348210 0.347201 2 1
253 -1.378235 -0.338405 0.016815 -0.394563 0.034043 1.023865 -0.303960 -1.316121 0.198697 0.670577 0.809574 0.580565 0.056004 2 1
254 -0.199959 -2.035812 -0.904507 -1.511975 -0.437843 0.262972 -1.943788 -1.963300 -2.256227 0.354369 -0.039829 0.882325 0.139307 2 1

255 rows × 15 columns

In [74]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[74]:
<matplotlib.axes._subplots.AxesSubplot at 0x1e82aec1940>
In [75]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[2]))

Gramma

ANN

In [76]:
X = df_n_ps_std_mfcc[2].drop(columns='Cluster')
In [77]:
y = df_n_ps[2]['chosen']
In [78]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [79]:
X_train.shape
Out[79]:
(231, 13)
In [80]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [81]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [82]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [83]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'relu', 'hidden_layer_sizes': (30,), 'learning_rate_init': 0.001, 'max_iter': 1000}, que permiten obtener un Accuracy de 84.42% y un Kappa del 45.84
Tiempo total: 27.81 minutos
In [84]:
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [85]:
input_tensor = Input(shape = (n0,))
In [86]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = 'tanh')(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [87]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [88]:
model.summary()
Model: "model_3"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_3 (InputLayer)         (None, 13)                0         
_________________________________________________________________
dense_8 (Dense)              (None, 30)                420       
_________________________________________________________________
dense_9 (Dense)              (None, 1)                 31        
=================================================================
Total params: 451
Trainable params: 451
Non-trainable params: 0
_________________________________________________________________
In [89]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), batch_size= 32,
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 231 samples, validate on 78 samples
Epoch 1/1000
231/231 [==============================] - 0s 1ms/step - loss: 0.8119 - accuracy: 0.4589 - val_loss: 0.7457 - val_accuracy: 0.5385
Epoch 2/1000
231/231 [==============================] - 0s 126us/step - loss: 0.7766 - accuracy: 0.4935 - val_loss: 0.7248 - val_accuracy: 0.5513
Epoch 3/1000
231/231 [==============================] - 0s 61us/step - loss: 0.7467 - accuracy: 0.5411 - val_loss: 0.7062 - val_accuracy: 0.5385
Epoch 4/1000
231/231 [==============================] - 0s 52us/step - loss: 0.7166 - accuracy: 0.5931 - val_loss: 0.6883 - val_accuracy: 0.5513
Epoch 5/1000
231/231 [==============================] - 0s 56us/step - loss: 0.6911 - accuracy: 0.6147 - val_loss: 0.6739 - val_accuracy: 0.5641
Epoch 6/1000
231/231 [==============================] - 0s 56us/step - loss: 0.6673 - accuracy: 0.6234 - val_loss: 0.6607 - val_accuracy: 0.5897
Epoch 7/1000
231/231 [==============================] - 0s 78us/step - loss: 0.6460 - accuracy: 0.6364 - val_loss: 0.6480 - val_accuracy: 0.5897
Epoch 8/1000
231/231 [==============================] - 0s 56us/step - loss: 0.6258 - accuracy: 0.6623 - val_loss: 0.6371 - val_accuracy: 0.6154
Epoch 9/1000
231/231 [==============================] - 0s 74us/step - loss: 0.6079 - accuracy: 0.6883 - val_loss: 0.6269 - val_accuracy: 0.6667
Epoch 10/1000
231/231 [==============================] - 0s 61us/step - loss: 0.5915 - accuracy: 0.7229 - val_loss: 0.6189 - val_accuracy: 0.6923
Epoch 11/1000
231/231 [==============================] - 0s 61us/step - loss: 0.5768 - accuracy: 0.7403 - val_loss: 0.6096 - val_accuracy: 0.7051
Epoch 12/1000
231/231 [==============================] - 0s 56us/step - loss: 0.5636 - accuracy: 0.7532 - val_loss: 0.6025 - val_accuracy: 0.7308
Epoch 13/1000
231/231 [==============================] - 0s 61us/step - loss: 0.5516 - accuracy: 0.7662 - val_loss: 0.5950 - val_accuracy: 0.7564
Epoch 14/1000
231/231 [==============================] - 0s 56us/step - loss: 0.5405 - accuracy: 0.7619 - val_loss: 0.5887 - val_accuracy: 0.7692
Epoch 15/1000
231/231 [==============================] - 0s 61us/step - loss: 0.5306 - accuracy: 0.7706 - val_loss: 0.5840 - val_accuracy: 0.7564
Epoch 16/1000
231/231 [==============================] - 0s 91us/step - loss: 0.5200 - accuracy: 0.7835 - val_loss: 0.5799 - val_accuracy: 0.7564
Epoch 17/1000
231/231 [==============================] - 0s 65us/step - loss: 0.5108 - accuracy: 0.7879 - val_loss: 0.5743 - val_accuracy: 0.7436
Epoch 18/1000
231/231 [==============================] - 0s 91us/step - loss: 0.5024 - accuracy: 0.7965 - val_loss: 0.5700 - val_accuracy: 0.7179
Epoch 19/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4944 - accuracy: 0.8139 - val_loss: 0.5660 - val_accuracy: 0.7051
Epoch 20/1000
231/231 [==============================] - 0s 69us/step - loss: 0.4864 - accuracy: 0.8139 - val_loss: 0.5606 - val_accuracy: 0.7179
Epoch 21/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4797 - accuracy: 0.8139 - val_loss: 0.5552 - val_accuracy: 0.7179
Epoch 22/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4734 - accuracy: 0.8182 - val_loss: 0.5507 - val_accuracy: 0.7308
Epoch 23/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4674 - accuracy: 0.8225 - val_loss: 0.5486 - val_accuracy: 0.7308
Epoch 24/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4615 - accuracy: 0.8225 - val_loss: 0.5479 - val_accuracy: 0.7436

Epoch 00024: ReduceLROnPlateau reducing learning rate to 0.0005000000237487257.
Epoch 25/1000
231/231 [==============================] - ETA: 0s - loss: 0.4421 - accuracy: 0.84 - 0s 56us/step - loss: 0.4574 - accuracy: 0.8182 - val_loss: 0.5472 - val_accuracy: 0.7436
Epoch 26/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4552 - accuracy: 0.8182 - val_loss: 0.5465 - val_accuracy: 0.7564
Epoch 27/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4527 - accuracy: 0.8139 - val_loss: 0.5450 - val_accuracy: 0.7564
Epoch 28/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4502 - accuracy: 0.8139 - val_loss: 0.5441 - val_accuracy: 0.7564
Epoch 29/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4479 - accuracy: 0.8182 - val_loss: 0.5429 - val_accuracy: 0.7564
Epoch 30/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4459 - accuracy: 0.8182 - val_loss: 0.5424 - val_accuracy: 0.7436
Epoch 31/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4439 - accuracy: 0.8182 - val_loss: 0.5413 - val_accuracy: 0.7436
Epoch 32/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4419 - accuracy: 0.8182 - val_loss: 0.5407 - val_accuracy: 0.7436
Epoch 33/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4402 - accuracy: 0.8139 - val_loss: 0.5400 - val_accuracy: 0.7436
Epoch 34/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4385 - accuracy: 0.8139 - val_loss: 0.5398 - val_accuracy: 0.7436

Epoch 00034: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
Epoch 35/1000
231/231 [==============================] - 0s 52us/step - loss: 0.4370 - accuracy: 0.8139 - val_loss: 0.5392 - val_accuracy: 0.7436
Epoch 36/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4361 - accuracy: 0.8182 - val_loss: 0.5391 - val_accuracy: 0.7436
Epoch 37/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4353 - accuracy: 0.8182 - val_loss: 0.5390 - val_accuracy: 0.7436
Epoch 38/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4346 - accuracy: 0.8182 - val_loss: 0.5384 - val_accuracy: 0.7436
Epoch 39/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4337 - accuracy: 0.8182 - val_loss: 0.5387 - val_accuracy: 0.7436
Epoch 40/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4329 - accuracy: 0.8182 - val_loss: 0.5382 - val_accuracy: 0.7436
Epoch 41/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4320 - accuracy: 0.8182 - val_loss: 0.5377 - val_accuracy: 0.7436
Epoch 42/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4311 - accuracy: 0.8139 - val_loss: 0.5370 - val_accuracy: 0.7436
Epoch 43/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4304 - accuracy: 0.8095 - val_loss: 0.5363 - val_accuracy: 0.7436
Epoch 44/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4298 - accuracy: 0.8095 - val_loss: 0.5358 - val_accuracy: 0.7436

Epoch 00044: ReduceLROnPlateau reducing learning rate to 0.0001250000059371814.
Epoch 45/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4291 - accuracy: 0.8139 - val_loss: 0.5355 - val_accuracy: 0.7436
Epoch 46/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4286 - accuracy: 0.8139 - val_loss: 0.5353 - val_accuracy: 0.7564
Epoch 47/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4282 - accuracy: 0.8182 - val_loss: 0.5353 - val_accuracy: 0.7436
Epoch 48/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4279 - accuracy: 0.8182 - val_loss: 0.5352 - val_accuracy: 0.7564
Epoch 49/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4275 - accuracy: 0.8182 - val_loss: 0.5350 - val_accuracy: 0.7436
Epoch 50/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4271 - accuracy: 0.8182 - val_loss: 0.5351 - val_accuracy: 0.7564
Epoch 51/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4267 - accuracy: 0.8182 - val_loss: 0.5350 - val_accuracy: 0.7436
Epoch 52/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4265 - accuracy: 0.8182 - val_loss: 0.5350 - val_accuracy: 0.7436
Epoch 53/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4261 - accuracy: 0.8182 - val_loss: 0.5348 - val_accuracy: 0.7436
Epoch 54/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4258 - accuracy: 0.8182 - val_loss: 0.5344 - val_accuracy: 0.7436

Epoch 00054: ReduceLROnPlateau reducing learning rate to 6.25000029685907e-05.
Epoch 55/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4255 - accuracy: 0.8182 - val_loss: 0.5345 - val_accuracy: 0.7436
Epoch 56/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4253 - accuracy: 0.8182 - val_loss: 0.5344 - val_accuracy: 0.7436
Epoch 57/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4251 - accuracy: 0.8182 - val_loss: 0.5344 - val_accuracy: 0.7436
Epoch 58/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4250 - accuracy: 0.8182 - val_loss: 0.5345 - val_accuracy: 0.7436
Epoch 59/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4248 - accuracy: 0.8182 - val_loss: 0.5347 - val_accuracy: 0.7436
Epoch 60/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4246 - accuracy: 0.8182 - val_loss: 0.5348 - val_accuracy: 0.7436
Epoch 61/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4244 - accuracy: 0.8182 - val_loss: 0.5348 - val_accuracy: 0.7436
Epoch 62/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4243 - accuracy: 0.8139 - val_loss: 0.5347 - val_accuracy: 0.7436
Epoch 63/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4241 - accuracy: 0.8139 - val_loss: 0.5348 - val_accuracy: 0.7436
Epoch 64/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4240 - accuracy: 0.8139 - val_loss: 0.5347 - val_accuracy: 0.7436

Epoch 00064: ReduceLROnPlateau reducing learning rate to 3.125000148429535e-05.
Epoch 65/1000
231/231 [==============================] - 0s 69us/step - loss: 0.4238 - accuracy: 0.8182 - val_loss: 0.5347 - val_accuracy: 0.7436
Epoch 66/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4237 - accuracy: 0.8139 - val_loss: 0.5347 - val_accuracy: 0.7436
Epoch 67/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4236 - accuracy: 0.8139 - val_loss: 0.5348 - val_accuracy: 0.7436
Epoch 68/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4235 - accuracy: 0.8139 - val_loss: 0.5347 - val_accuracy: 0.7436
Epoch 69/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4235 - accuracy: 0.8139 - val_loss: 0.5347 - val_accuracy: 0.7436
Epoch 70/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4234 - accuracy: 0.8139 - val_loss: 0.5346 - val_accuracy: 0.7436
Epoch 71/1000
231/231 [==============================] - 0s 69us/step - loss: 0.4233 - accuracy: 0.8139 - val_loss: 0.5346 - val_accuracy: 0.7436
Epoch 72/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4232 - accuracy: 0.8139 - val_loss: 0.5346 - val_accuracy: 0.7436
Epoch 73/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4232 - accuracy: 0.8139 - val_loss: 0.5346 - val_accuracy: 0.7436
Epoch 74/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4231 - accuracy: 0.8139 - val_loss: 0.5345 - val_accuracy: 0.7436

Epoch 00074: ReduceLROnPlateau reducing learning rate to 1.5625000742147677e-05.
Epoch 75/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4230 - accuracy: 0.8139 - val_loss: 0.5345 - val_accuracy: 0.7436
Epoch 76/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4230 - accuracy: 0.8139 - val_loss: 0.5345 - val_accuracy: 0.7436
Epoch 77/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4229 - accuracy: 0.8139 - val_loss: 0.5345 - val_accuracy: 0.7436
Epoch 78/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4229 - accuracy: 0.8139 - val_loss: 0.5345 - val_accuracy: 0.7436
Epoch 79/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4228 - accuracy: 0.8139 - val_loss: 0.5344 - val_accuracy: 0.7436
Epoch 80/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4228 - accuracy: 0.8139 - val_loss: 0.5344 - val_accuracy: 0.7436
Epoch 81/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4227 - accuracy: 0.8139 - val_loss: 0.5344 - val_accuracy: 0.7436
Epoch 82/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4227 - accuracy: 0.8139 - val_loss: 0.5345 - val_accuracy: 0.7436
Epoch 83/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4227 - accuracy: 0.8139 - val_loss: 0.5344 - val_accuracy: 0.7436
Epoch 84/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4226 - accuracy: 0.8139 - val_loss: 0.5344 - val_accuracy: 0.7436

Epoch 00084: ReduceLROnPlateau reducing learning rate to 7.812500371073838e-06.
Epoch 85/1000
231/231 [==============================] - 0s 69us/step - loss: 0.4226 - accuracy: 0.8139 - val_loss: 0.5344 - val_accuracy: 0.7436
Epoch 86/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4225 - accuracy: 0.8139 - val_loss: 0.5344 - val_accuracy: 0.7436
Epoch 87/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4225 - accuracy: 0.8139 - val_loss: 0.5344 - val_accuracy: 0.7436
Epoch 88/1000
231/231 [==============================] - 0s 69us/step - loss: 0.4225 - accuracy: 0.8139 - val_loss: 0.5344 - val_accuracy: 0.7436
Epoch 89/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4225 - accuracy: 0.8139 - val_loss: 0.5344 - val_accuracy: 0.7436
Epoch 90/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4225 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 91/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4224 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 92/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4224 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 93/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4224 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 94/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4224 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436

Epoch 00094: ReduceLROnPlateau reducing learning rate to 3.906250185536919e-06.
Epoch 95/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4224 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 96/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4223 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 97/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4223 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 98/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4223 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 99/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4223 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 100/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4223 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 101/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4223 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 102/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4223 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 103/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4223 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 104/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4223 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436

Epoch 00104: ReduceLROnPlateau reducing learning rate to 1.9531250927684596e-06.
Epoch 105/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4223 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 106/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 107/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 108/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 109/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 110/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 111/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 112/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 113/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 114/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00114: ReduceLROnPlateau reducing learning rate to 9.765625463842298e-07.
Epoch 115/1000
231/231 [==============================] - 0s 130us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 116/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 117/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 118/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 119/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 120/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 121/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 122/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 123/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 124/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00124: ReduceLROnPlateau reducing learning rate to 4.882812731921149e-07.
Epoch 125/1000
231/231 [==============================] - 0s 69us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 126/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 127/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 128/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 129/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 130/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 131/1000
231/231 [==============================] - 0s 121us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 132/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 133/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 134/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00134: ReduceLROnPlateau reducing learning rate to 2.4414063659605745e-07.
Epoch 135/1000
231/231 [==============================] - 0s 134us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 136/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 137/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 138/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 139/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 140/1000
231/231 [==============================] - 0s 117us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 141/1000
231/231 [==============================] - 0s 121us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 142/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 143/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 144/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00144: ReduceLROnPlateau reducing learning rate to 1.2207031829802872e-07.
Epoch 145/1000
231/231 [==============================] - 0s 1ms/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 146/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 147/1000
231/231 [==============================] - 0s 121us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 148/1000
231/231 [==============================] - 0s 114us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 149/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 150/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 151/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 152/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 153/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 154/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00154: ReduceLROnPlateau reducing learning rate to 6.103515914901436e-08.
Epoch 155/1000
231/231 [==============================] - 0s 117us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 156/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 157/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 158/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 159/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 160/1000
231/231 [==============================] - 0s 117us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 161/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 162/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 163/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 164/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00164: ReduceLROnPlateau reducing learning rate to 3.051757957450718e-08.
Epoch 165/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 166/1000
231/231 [==============================] - 0s 143us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 167/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 168/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 169/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 170/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 171/1000
231/231 [==============================] - 0s 117us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 172/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 173/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 174/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00174: ReduceLROnPlateau reducing learning rate to 1.525878978725359e-08.
Epoch 175/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 176/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 177/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 178/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 179/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 180/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 181/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 182/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 183/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 184/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00184: ReduceLROnPlateau reducing learning rate to 7.629394893626795e-09.
Epoch 185/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 186/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 187/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 188/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 189/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 190/1000
231/231 [==============================] - 0s 125us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 191/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 192/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 193/1000
231/231 [==============================] - ETA: 0s - loss: 0.3107 - accuracy: 0.90 - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 194/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00194: ReduceLROnPlateau reducing learning rate to 3.814697446813398e-09.
Epoch 195/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 196/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 197/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 198/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 199/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 200/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 201/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 202/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 203/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 204/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00204: ReduceLROnPlateau reducing learning rate to 1.907348723406699e-09.
Epoch 205/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 206/1000
231/231 [==============================] - 0s 121us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 207/1000
231/231 [==============================] - 0s 121us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 208/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 209/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 210/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 211/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 212/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 213/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 214/1000
231/231 [==============================] - 0s 117us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00214: ReduceLROnPlateau reducing learning rate to 9.536743617033494e-10.
Epoch 215/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 216/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 217/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 218/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 219/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 220/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 221/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 222/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 223/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 224/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00224: ReduceLROnPlateau reducing learning rate to 4.768371808516747e-10.
Epoch 225/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 226/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 227/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 228/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 229/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 230/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 231/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 232/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 233/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 234/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00234: ReduceLROnPlateau reducing learning rate to 2.3841859042583735e-10.
Epoch 235/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 236/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 237/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 238/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 239/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 240/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 241/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 242/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 243/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 244/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00244: ReduceLROnPlateau reducing learning rate to 1.1920929521291868e-10.
Epoch 245/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 246/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 247/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 248/1000
231/231 [==============================] - 0s 117us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 249/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 250/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 251/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 252/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 253/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 254/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00254: ReduceLROnPlateau reducing learning rate to 5.960464760645934e-11.
Epoch 255/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 256/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 257/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 258/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 259/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 260/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 261/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 262/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 263/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 264/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00264: ReduceLROnPlateau reducing learning rate to 2.980232380322967e-11.
Epoch 265/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 266/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 267/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 268/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 269/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 270/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 271/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 272/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 273/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 274/1000
231/231 [==============================] - 0s 117us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00274: ReduceLROnPlateau reducing learning rate to 1.4901161901614834e-11.
Epoch 275/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 276/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 277/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 278/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 279/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 280/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 281/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 282/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 283/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 284/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00284: ReduceLROnPlateau reducing learning rate to 7.450580950807417e-12.
Epoch 285/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 286/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 287/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 288/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 289/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 290/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 291/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 292/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 293/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 294/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00294: ReduceLROnPlateau reducing learning rate to 3.725290475403709e-12.
Epoch 295/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 296/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 297/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 298/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 299/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 300/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 301/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 302/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 303/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 304/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00304: ReduceLROnPlateau reducing learning rate to 1.8626452377018543e-12.
Epoch 305/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 306/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 307/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 308/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 309/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 310/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 311/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 312/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 313/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 314/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00314: ReduceLROnPlateau reducing learning rate to 9.313226188509272e-13.
Epoch 315/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 316/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 317/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 318/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 319/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 320/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 321/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 322/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 323/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 324/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00324: ReduceLROnPlateau reducing learning rate to 4.656613094254636e-13.
Epoch 325/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 326/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 327/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 328/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 329/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 330/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 331/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 332/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 333/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 334/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00334: ReduceLROnPlateau reducing learning rate to 2.328306547127318e-13.
Epoch 335/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 336/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 337/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 338/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 339/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 340/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 341/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 342/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 343/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 344/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00344: ReduceLROnPlateau reducing learning rate to 1.164153273563659e-13.
Epoch 345/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 346/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 347/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 348/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 349/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 350/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 351/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 352/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 353/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 354/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00354: ReduceLROnPlateau reducing learning rate to 5.820766367818295e-14.
Epoch 355/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 356/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 357/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 358/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 359/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 360/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 361/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 362/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 363/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 364/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00364: ReduceLROnPlateau reducing learning rate to 2.9103831839091474e-14.
Epoch 365/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 366/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 367/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 368/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 369/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 370/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 371/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 372/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 373/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 374/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00374: ReduceLROnPlateau reducing learning rate to 1.4551915919545737e-14.
Epoch 375/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 376/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 377/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 378/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 379/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 380/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 381/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 382/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 383/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 384/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00384: ReduceLROnPlateau reducing learning rate to 7.275957959772868e-15.
Epoch 385/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 386/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 387/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 388/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 389/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 390/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 391/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 392/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 393/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 394/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00394: ReduceLROnPlateau reducing learning rate to 3.637978979886434e-15.
Epoch 395/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 396/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 397/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 398/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 399/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 400/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 401/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 402/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 403/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 404/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00404: ReduceLROnPlateau reducing learning rate to 1.818989489943217e-15.
Epoch 405/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 406/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 407/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 408/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 409/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 410/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 411/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 412/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 413/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 414/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00414: ReduceLROnPlateau reducing learning rate to 9.094947449716085e-16.
Epoch 415/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 416/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 417/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 418/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 419/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 420/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 421/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 422/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 423/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 424/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00424: ReduceLROnPlateau reducing learning rate to 4.547473724858043e-16.
Epoch 425/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 426/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 427/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 428/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 429/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 430/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 431/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 432/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 433/1000
231/231 [==============================] - 0s 117us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 434/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00434: ReduceLROnPlateau reducing learning rate to 2.2737368624290214e-16.
Epoch 435/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 436/1000
231/231 [==============================] - ETA: 0s - loss: 0.6507 - accuracy: 0.62 - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 437/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 438/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 439/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 440/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 441/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 442/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 443/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 444/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00444: ReduceLROnPlateau reducing learning rate to 1.1368684312145107e-16.
Epoch 445/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 446/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 447/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 448/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 449/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 450/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 451/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 452/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 453/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 454/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00454: ReduceLROnPlateau reducing learning rate to 5.684342156072553e-17.
Epoch 455/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 456/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 457/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 458/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 459/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 460/1000
231/231 [==============================] - 0s 203us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 461/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 462/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 463/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 464/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00464: ReduceLROnPlateau reducing learning rate to 2.842171078036277e-17.
Epoch 465/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 466/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 467/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 468/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 469/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 470/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 471/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 472/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 473/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 474/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00474: ReduceLROnPlateau reducing learning rate to 1.4210855390181384e-17.
Epoch 475/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 476/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 477/1000
231/231 [==============================] - 0s 69us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 478/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 479/1000
231/231 [==============================] - 0s 125us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 480/1000
231/231 [==============================] - ETA: 0s - loss: 0.4785 - accuracy: 0.78 - 0s 143us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 481/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 482/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 483/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 484/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00484: ReduceLROnPlateau reducing learning rate to 7.105427695090692e-18.
Epoch 485/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 486/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 487/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 488/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 489/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 490/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 491/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 492/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 493/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 494/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00494: ReduceLROnPlateau reducing learning rate to 3.552713847545346e-18.
Epoch 495/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 496/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 497/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 498/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 499/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 500/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 501/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 502/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 503/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 504/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00504: ReduceLROnPlateau reducing learning rate to 1.776356923772673e-18.
Epoch 505/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 506/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 507/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 508/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 509/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 510/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 511/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 512/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 513/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 514/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00514: ReduceLROnPlateau reducing learning rate to 8.881784618863365e-19.
Epoch 515/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 516/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 517/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 518/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 519/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 520/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 521/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 522/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 523/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 524/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00524: ReduceLROnPlateau reducing learning rate to 4.440892309431682e-19.
Epoch 525/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 526/1000
231/231 [==============================] - 0s 117us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 527/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 528/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 529/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 530/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 531/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 532/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 533/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 534/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00534: ReduceLROnPlateau reducing learning rate to 2.220446154715841e-19.
Epoch 535/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 536/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 537/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 538/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 539/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 540/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 541/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 542/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 543/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 544/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00544: ReduceLROnPlateau reducing learning rate to 1.1102230773579206e-19.
Epoch 545/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 546/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 547/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 548/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 549/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 550/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 551/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 552/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 553/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 554/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00554: ReduceLROnPlateau reducing learning rate to 5.551115386789603e-20.
Epoch 555/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 556/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 557/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 558/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 559/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 560/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 561/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 562/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 563/1000
231/231 [==============================] - 0s 125us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 564/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00564: ReduceLROnPlateau reducing learning rate to 2.7755576933948015e-20.
Epoch 565/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 566/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 567/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 568/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 569/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 570/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 571/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 572/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 573/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 574/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00574: ReduceLROnPlateau reducing learning rate to 1.3877788466974007e-20.
Epoch 575/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 576/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 577/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 578/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 579/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 580/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 581/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 582/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 583/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 584/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00584: ReduceLROnPlateau reducing learning rate to 6.938894233487004e-21.
Epoch 585/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 586/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 587/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 588/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 589/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 590/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 591/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 592/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 593/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 594/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00594: ReduceLROnPlateau reducing learning rate to 3.469447116743502e-21.
Epoch 595/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 596/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 597/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 598/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 599/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 600/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 601/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 602/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 603/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 604/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00604: ReduceLROnPlateau reducing learning rate to 1.734723558371751e-21.
Epoch 605/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 606/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 607/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 608/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 609/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 610/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 611/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 612/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 613/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 614/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00614: ReduceLROnPlateau reducing learning rate to 8.673617791858755e-22.
Epoch 615/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 616/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 617/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 618/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 619/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 620/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 621/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 622/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 623/1000
231/231 [==============================] - 0s 117us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 624/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00624: ReduceLROnPlateau reducing learning rate to 4.336808895929377e-22.
Epoch 625/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 626/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 627/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 628/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 629/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 630/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 631/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 632/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 633/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 634/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00634: ReduceLROnPlateau reducing learning rate to 2.1684044479646887e-22.
Epoch 635/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 636/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 637/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 638/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 639/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 640/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 641/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 642/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 643/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 644/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00644: ReduceLROnPlateau reducing learning rate to 1.0842022239823443e-22.
Epoch 645/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 646/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 647/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 648/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 649/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 650/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 651/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 652/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 653/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 654/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00654: ReduceLROnPlateau reducing learning rate to 5.421011119911722e-23.
Epoch 655/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 656/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 657/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 658/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 659/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 660/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 661/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 662/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 663/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 664/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00664: ReduceLROnPlateau reducing learning rate to 2.710505559955861e-23.
Epoch 665/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 666/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 667/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 668/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 669/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 670/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 671/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 672/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 673/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 674/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00674: ReduceLROnPlateau reducing learning rate to 1.3552527799779304e-23.
Epoch 675/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 676/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 677/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 678/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 679/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 680/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 681/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 682/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 683/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 684/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00684: ReduceLROnPlateau reducing learning rate to 6.776263899889652e-24.
Epoch 685/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 686/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 687/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 688/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 689/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 690/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 691/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 692/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 693/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 694/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00694: ReduceLROnPlateau reducing learning rate to 3.388131949944826e-24.
Epoch 695/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 696/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 697/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 698/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 699/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 700/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 701/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 702/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 703/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 704/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00704: ReduceLROnPlateau reducing learning rate to 1.694065974972413e-24.
Epoch 705/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 706/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 707/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 708/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 709/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 710/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 711/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 712/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 713/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 714/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00714: ReduceLROnPlateau reducing learning rate to 8.470329874862065e-25.
Epoch 715/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 716/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 717/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 718/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 719/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 720/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 721/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 722/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 723/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 724/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00724: ReduceLROnPlateau reducing learning rate to 4.2351649374310325e-25.
Epoch 725/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 726/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 727/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 728/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 729/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 730/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 731/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 732/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 733/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 734/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00734: ReduceLROnPlateau reducing learning rate to 2.1175824687155163e-25.
Epoch 735/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 736/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 737/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 738/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 739/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 740/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 741/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 742/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 743/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 744/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00744: ReduceLROnPlateau reducing learning rate to 1.0587912343577581e-25.
Epoch 745/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 746/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 747/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 748/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 749/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 750/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 751/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 752/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 753/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 754/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00754: ReduceLROnPlateau reducing learning rate to 5.293956171788791e-26.
Epoch 755/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 756/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 757/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 758/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 759/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 760/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 761/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 762/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 763/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 764/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00764: ReduceLROnPlateau reducing learning rate to 2.6469780858943953e-26.
Epoch 765/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 766/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 767/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 768/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 769/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 770/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 771/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 772/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 773/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 774/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00774: ReduceLROnPlateau reducing learning rate to 1.3234890429471977e-26.
Epoch 775/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 776/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 777/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 778/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 779/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 780/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 781/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 782/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 783/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 784/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00784: ReduceLROnPlateau reducing learning rate to 6.617445214735988e-27.
Epoch 785/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 786/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 787/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 788/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 789/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 790/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 791/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 792/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 793/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 794/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00794: ReduceLROnPlateau reducing learning rate to 3.308722607367994e-27.
Epoch 795/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 796/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 797/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 798/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 799/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 800/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 801/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 802/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 803/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 804/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00804: ReduceLROnPlateau reducing learning rate to 1.654361303683997e-27.
Epoch 805/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 806/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 807/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 808/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 809/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 810/1000
231/231 [==============================] - 0s 130us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 811/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 812/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 813/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 814/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00814: ReduceLROnPlateau reducing learning rate to 8.271806518419985e-28.
Epoch 815/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 816/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 817/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 818/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 819/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 820/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 821/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 822/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 823/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 824/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00824: ReduceLROnPlateau reducing learning rate to 4.135903259209993e-28.
Epoch 825/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 826/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 827/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 828/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 829/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 830/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 831/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 832/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 833/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 834/1000
231/231 [==============================] - 0s 117us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00834: ReduceLROnPlateau reducing learning rate to 2.0679516296049964e-28.
Epoch 835/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 836/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 837/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 838/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 839/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 840/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 841/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 842/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 843/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 844/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00844: ReduceLROnPlateau reducing learning rate to 1.0339758148024982e-28.
Epoch 845/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 846/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 847/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 848/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 849/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 850/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 851/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 852/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 853/1000
231/231 [==============================] - 0s 121us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 854/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00854: ReduceLROnPlateau reducing learning rate to 5.169879074012491e-29.
Epoch 855/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 856/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 857/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 858/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 859/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 860/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 861/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 862/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 863/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 864/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00864: ReduceLROnPlateau reducing learning rate to 2.5849395370062454e-29.
Epoch 865/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 866/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 867/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 868/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 869/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 870/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 871/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 872/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 873/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 874/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00874: ReduceLROnPlateau reducing learning rate to 1.2924697685031227e-29.
Epoch 875/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 876/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 877/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 878/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 879/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 880/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 881/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 882/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 883/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 884/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00884: ReduceLROnPlateau reducing learning rate to 6.462348842515614e-30.
Epoch 885/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 886/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 887/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 888/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 889/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 890/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 891/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 892/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 893/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 894/1000
231/231 [==============================] - 0s 117us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00894: ReduceLROnPlateau reducing learning rate to 3.231174421257807e-30.
Epoch 895/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 896/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 897/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 898/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 899/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 900/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 901/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 902/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 903/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 904/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00904: ReduceLROnPlateau reducing learning rate to 1.6155872106289034e-30.
Epoch 905/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 906/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 907/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 908/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 909/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 910/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 911/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 912/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 913/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 914/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00914: ReduceLROnPlateau reducing learning rate to 8.077936053144517e-31.
Epoch 915/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 916/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 917/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 918/1000
231/231 [==============================] - 0s 130us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 919/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 920/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 921/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 922/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 923/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 924/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00924: ReduceLROnPlateau reducing learning rate to 4.0389680265722585e-31.
Epoch 925/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 926/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 927/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 928/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 929/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 930/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 931/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 932/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 933/1000
231/231 [==============================] - 0s 121us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 934/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00934: ReduceLROnPlateau reducing learning rate to 2.0194840132861292e-31.
Epoch 935/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 936/1000
231/231 [==============================] - 0s 143us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 937/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 938/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 939/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 940/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 941/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 942/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 943/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 944/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00944: ReduceLROnPlateau reducing learning rate to 1.0097420066430646e-31.
Epoch 945/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 946/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 947/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 948/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 949/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 950/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 951/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 952/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 953/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 954/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00954: ReduceLROnPlateau reducing learning rate to 5.048710033215323e-32.
Epoch 955/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 956/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 957/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 958/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 959/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 960/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 961/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 962/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 963/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 964/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00964: ReduceLROnPlateau reducing learning rate to 2.5243550166076616e-32.
Epoch 965/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 966/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 967/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 968/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 969/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 970/1000
231/231 [==============================] - 0s 117us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 971/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 972/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 973/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 974/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00974: ReduceLROnPlateau reducing learning rate to 1.2621775083038308e-32.
Epoch 975/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 976/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 977/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 978/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 979/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 980/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 981/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 982/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 983/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 984/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00984: ReduceLROnPlateau reducing learning rate to 6.310887541519154e-33.
Epoch 985/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 986/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 987/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 988/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 989/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 990/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 991/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 992/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 993/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 994/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00994: ReduceLROnPlateau reducing learning rate to 3.155443770759577e-33.
Epoch 995/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 996/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 997/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 998/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 999/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 1000/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
In [57]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 2000)
In [90]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
78/78 [==============================] - 0s 77us/step
test loss: 0.5342327150014731, test accuracy: 0.7435897588729858
In [91]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.655664585191793
In [92]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
Kappa:  0.07253269916765748

KMeans

In [93]:
X
Out[93]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13
0 -0.784459 0.109342 0.681608 1.151904 -0.707724 0.736365 0.241404 -0.461605 1.084621 0.123497 -0.150398 1.784532 0.824544
1 0.213444 0.453851 0.215705 0.987439 -1.851748 -0.001814 -0.218203 0.548263 -0.521851 1.253720 0.882518 -0.892913 0.218184
2 1.478029 0.664210 0.484232 0.450284 -0.427587 -0.448079 -0.195272 -0.610679 -0.675633 0.759098 -0.107303 -0.440790 -1.398093
3 -0.846386 -0.742706 -0.734786 -0.199585 -0.328948 -0.230911 0.620910 0.911236 1.274407 0.150882 -0.603865 -0.919849 0.386069
4 -0.952033 -0.794706 -1.141199 -0.070979 0.454453 0.544600 1.427005 1.918539 1.204102 0.076961 -0.328712 -1.051776 -0.151007
5 -1.244473 2.250723 2.321007 0.549219 1.971653 -1.427849 0.392314 -1.015093 0.157566 0.486970 -0.455070 0.229936 0.185742
6 -0.128652 0.958605 1.191477 0.705974 0.793937 -0.709130 -0.221572 0.922219 0.500475 -0.571099 0.521526 0.567361 -0.486761
7 -1.148662 0.562286 0.636059 0.222732 0.741470 0.009108 -0.949365 -0.486631 -0.519627 -0.739600 -0.415884 -0.026850 0.290685
8 -0.440190 0.461355 -0.016542 -0.158636 0.985626 -0.417310 0.182134 0.290631 0.379949 0.069159 1.080013 0.220566 -0.042505
9 -0.695730 0.398404 1.069978 -0.124019 0.736597 -0.912452 0.673156 0.604840 0.175505 0.496158 0.541788 0.649837 -0.680054
10 -0.006662 0.163832 1.373872 -0.095120 1.621755 1.048509 0.997122 0.721763 0.660834 -1.076324 0.925997 -0.147393 -0.420465
11 -0.771763 -0.484525 -0.874411 0.647747 -1.241650 0.190918 0.457290 0.915208 1.999689 1.879761 0.491598 -0.164372 -0.560754
12 0.140770 1.869847 -1.926303 -2.491201 -2.679759 -1.527330 -0.299345 -0.550878 0.702947 0.143961 0.034796 -0.379551 -0.422354
13 -1.952477 -0.949813 0.063314 1.188657 1.059601 1.221319 0.070346 2.284107 2.889527 2.012105 1.053494 -0.178905 -2.004333
14 -0.895529 0.398850 -0.469782 1.216393 0.657294 -0.550619 -0.854637 -0.815454 1.929689 1.499328 -0.096775 -0.174183 -1.119396
15 -1.161372 1.475106 1.486594 0.127516 0.213940 0.587080 -0.789652 0.130203 1.199389 1.458358 0.404206 0.754289 -0.784214
16 -0.476792 2.179287 0.101035 -1.393755 -0.740834 0.589666 0.873850 0.630539 0.535702 0.387326 -0.979677 0.259755 0.313358
17 -0.089088 -0.841832 0.674093 -0.842623 0.904577 -1.476862 1.853427 -1.108621 0.720923 0.383320 -1.842030 1.712321 -1.612726
18 -1.772732 0.488101 0.057829 0.041074 0.732429 1.052187 0.279830 -0.350521 -0.476338 -0.833438 0.184849 -0.055428 0.627307
19 -0.640351 0.068493 0.619966 -0.599171 0.860806 -0.385120 1.955087 -1.014740 1.224043 1.450896 -2.604448 2.187869 -0.464774
20 0.590240 0.699904 -0.097902 0.127319 -0.882999 0.319144 -0.146142 -0.540616 0.300593 0.688863 0.314647 0.709538 0.572811
21 0.500240 0.875222 -0.833826 0.377484 0.023480 1.321472 1.094037 0.734507 0.141947 0.214524 0.508556 -0.265911 -0.372316
22 -0.076653 0.518030 0.003390 0.452969 -0.218736 0.115409 0.332618 0.611098 0.211893 -0.206368 0.358363 0.614915 0.518172
23 0.010763 -0.352873 -0.460051 0.423968 -0.228393 -0.040296 -0.740869 -0.810034 -1.379366 -0.179024 0.147810 -0.224826 0.615011
24 0.874600 0.173728 -1.041125 0.845285 1.139221 0.264458 -0.378878 0.430226 -0.568469 -1.237333 0.032074 0.812111 0.431460
25 0.200637 0.337376 0.022126 1.189135 -0.210135 -1.195492 0.067874 1.349711 -0.534365 -0.132754 0.055132 0.239009 -0.275633
26 0.362627 0.159292 -1.211688 -0.555502 0.107540 0.797027 -0.246321 -1.113565 -1.373054 -2.369077 -0.539483 1.032005 1.637730
27 -0.504648 -0.561515 -2.173809 -1.525691 -0.810132 -0.617474 0.441103 1.146056 1.464488 -1.111032 -0.742722 0.034623 0.200147
28 -0.339646 -2.140319 -1.409226 -0.207553 -1.216547 -1.135346 -0.831817 1.136334 -0.187159 1.388841 0.282573 -0.807850 -0.371992
29 -1.279089 1.555887 0.890503 2.134195 0.337580 -0.037382 -2.046955 -2.888113 1.329665 1.436687 -1.576201 0.485256 1.429246
... ... ... ... ... ... ... ... ... ... ... ... ... ...
279 -1.132789 -0.931481 -0.350024 -0.228575 -1.201208 -1.044342 0.532403 1.667036 1.383485 -0.967474 -0.286625 -1.920618 -0.797190
280 -0.375948 0.058369 0.489068 0.862825 -1.876102 -0.195043 -1.163295 0.716190 0.384576 -0.168340 1.542126 -0.769460 0.456686
281 0.412883 -1.703432 -0.514845 -1.382818 -0.713972 -0.476089 1.471006 0.826485 0.508608 -1.311788 -2.010635 -1.122699 -0.848851
282 -0.152329 -2.012108 -0.217355 -1.122627 -0.851075 0.634424 1.711007 0.281350 -0.565156 -1.667195 -1.942452 -1.586592 -0.485128
283 0.348443 -2.381428 1.267515 -1.713290 0.161262 -1.589515 1.383857 -0.218429 0.412550 0.382171 -1.073499 -1.745128 -3.227845
284 -0.895866 1.001673 1.059356 0.166883 -0.710729 0.466737 -0.857566 -0.158962 0.004241 0.391823 0.576231 0.329506 -1.331272
285 0.417102 1.957515 2.350604 -1.125042 -2.206390 -0.674814 -1.217854 0.372865 0.840465 -0.472910 0.310419 1.379494 1.128412
286 -0.900897 -0.289100 0.433265 -0.281829 -0.379951 1.272236 0.313949 -0.261980 -0.053111 0.473694 0.493962 -0.263293 -0.657598
287 -0.002448 -0.853612 0.441903 0.406478 -0.823085 0.590185 -0.292046 -0.079952 -0.422138 0.579522 -0.620415 -0.298847 0.620798
288 -0.528092 -1.022206 -0.348679 0.093718 -1.642833 -2.355166 -0.992806 -0.143423 0.270521 0.838321 0.843686 0.469574 -0.325121
289 -0.387248 -1.305014 -0.365540 0.202745 -0.906016 -1.785190 -1.377992 -0.544742 -0.670979 -0.785606 0.505505 0.502505 -0.151297
290 0.425324 -2.583173 -2.181080 -1.262030 -0.179265 0.176164 1.763096 0.436737 -2.048534 -1.014266 1.298221 0.401742 -1.080608
291 -0.572282 -0.375532 -2.067885 -0.361247 -0.315065 -0.671820 -0.183865 -0.517694 -0.802956 -0.951809 0.282442 0.208005 -0.271252
292 -0.084382 -1.508230 -0.105496 -1.930204 -1.529664 -0.795467 1.273717 -1.858542 -0.446361 -0.239346 0.154464 -0.114937 -1.831603
293 -1.172703 0.783209 -1.141589 -0.982768 -0.513216 0.655437 1.962510 0.628858 1.130028 1.104741 1.539591 1.547843 -0.011302
294 -1.293038 0.838303 -1.049071 -0.708031 -0.779995 0.868108 1.621994 0.725495 1.173585 1.424395 1.751950 1.352876 0.339922
295 0.809878 -0.351504 -2.231752 -0.556719 -1.430264 -0.357918 -0.727837 1.110363 1.684188 0.429768 0.560061 0.371789 -1.110030
296 0.305449 -0.148924 -0.727054 -0.126830 0.467272 0.420013 1.212777 0.954055 -0.988419 -0.423614 -0.047239 0.058678 -0.031517
297 1.212224 1.916789 0.287969 -0.073842 0.289112 0.943764 -0.395404 -0.380613 0.262567 0.759137 0.277177 0.493951 1.026995
298 -0.121307 0.217217 0.030920 -0.201270 -0.752001 -0.276070 0.835502 -0.363704 -0.641199 0.283313 0.060013 0.013280 0.477857
299 -0.622824 -0.595352 0.256282 -0.111551 0.023990 1.221659 1.572998 -0.263983 -0.707828 0.707801 0.306249 1.046476 0.214979
300 -0.667480 -0.808638 0.730781 0.054549 0.191421 0.279885 0.088177 0.823617 0.604299 0.640274 -0.360151 1.298688 0.494875
301 0.928382 -2.375767 -0.427528 -0.852350 -1.137004 1.584181 -1.700220 -2.060965 -1.326622 0.451948 0.593212 0.152418 -0.128797
302 -0.483888 0.443846 0.129714 0.199624 -0.106985 0.817702 -0.072817 -1.163918 0.545762 -0.141320 0.041767 -0.402181 0.061897
303 0.715769 0.780533 1.467750 -0.595580 -1.178484 4.014345 -0.112339 -1.611382 -0.295511 0.032462 1.836607 -4.315898 -1.084441
304 0.041466 -0.470275 0.234655 0.109532 -0.518455 -0.977540 -0.613498 -1.108545 0.500653 -0.214143 -0.033265 -0.541673 0.714974
305 0.818747 0.495675 1.005686 0.967334 0.505171 -0.579478 -0.847677 1.574323 1.544556 0.412556 -0.972040 0.290457 0.289042
306 1.062928 -1.149587 1.951840 -0.065775 0.546680 0.994901 -1.817826 2.109742 0.264443 0.505287 -0.757462 0.578677 0.222503
307 -0.701621 -0.049803 -0.719153 -0.048069 1.223251 1.913492 0.887449 0.038186 0.546172 -0.568362 -1.091833 -0.250367 0.831399
308 -0.079821 0.796085 -0.215763 -1.396439 -0.133350 0.582037 2.442796 0.743250 -1.182753 -0.723658 -0.879934 -2.498899 -1.532262

309 rows × 13 columns

In [94]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[94]:
[4016.9999999999995,
 3599.8293806720085,
 3349.349727264702,
 3159.6428991584926,
 3002.905895600155,
 2909.3645052598604,
 2804.695882234172,
 2720.275460001156,
 2622.695881163609,
 2543.6022931320426,
 2484.176525692807,
 2436.6681239209124,
 2402.363548718592,
 2338.201438573343]
In [95]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[95]:
[<matplotlib.lines.Line2D at 0x1e82b4d8dd8>]

K=2

In [96]:
kmeans_mfcc = KMeans(n_clusters=2, random_state=0, n_init=10)
kmeans_mfcc.fit(X)
Out[96]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=2, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [97]:
kmeans_mfcc.labels_
Out[97]:
array([1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0,
       0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0,
       0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1,
       1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1,
       0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0,
       0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1,
       1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0,
       1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1,
       1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1,
       0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1,
       1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,
       0])
In [98]:
clusters_mfcc = kmeans_mfcc.predict(X)
clusters_mfcc
Out[98]:
array([1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0,
       0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0,
       0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1,
       1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1,
       0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0,
       0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1,
       1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0,
       1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1,
       1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1,
       0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1,
       1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,
       0])
In [99]:
X.loc[:,'Cluster'] = clusters_mfcc
X.loc[:,'chosen'] = list(y)
In [100]:
X
Out[100]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13 Cluster chosen
0 -0.784459 0.109342 0.681608 1.151904 -0.707724 0.736365 0.241404 -0.461605 1.084621 0.123497 -0.150398 1.784532 0.824544 1 0
1 0.213444 0.453851 0.215705 0.987439 -1.851748 -0.001814 -0.218203 0.548263 -0.521851 1.253720 0.882518 -0.892913 0.218184 1 0
2 1.478029 0.664210 0.484232 0.450284 -0.427587 -0.448079 -0.195272 -0.610679 -0.675633 0.759098 -0.107303 -0.440790 -1.398093 0 0
3 -0.846386 -0.742706 -0.734786 -0.199585 -0.328948 -0.230911 0.620910 0.911236 1.274407 0.150882 -0.603865 -0.919849 0.386069 0 0
4 -0.952033 -0.794706 -1.141199 -0.070979 0.454453 0.544600 1.427005 1.918539 1.204102 0.076961 -0.328712 -1.051776 -0.151007 0 0
5 -1.244473 2.250723 2.321007 0.549219 1.971653 -1.427849 0.392314 -1.015093 0.157566 0.486970 -0.455070 0.229936 0.185742 1 0
6 -0.128652 0.958605 1.191477 0.705974 0.793937 -0.709130 -0.221572 0.922219 0.500475 -0.571099 0.521526 0.567361 -0.486761 1 0
7 -1.148662 0.562286 0.636059 0.222732 0.741470 0.009108 -0.949365 -0.486631 -0.519627 -0.739600 -0.415884 -0.026850 0.290685 1 0
8 -0.440190 0.461355 -0.016542 -0.158636 0.985626 -0.417310 0.182134 0.290631 0.379949 0.069159 1.080013 0.220566 -0.042505 1 0
9 -0.695730 0.398404 1.069978 -0.124019 0.736597 -0.912452 0.673156 0.604840 0.175505 0.496158 0.541788 0.649837 -0.680054 1 0
10 -0.006662 0.163832 1.373872 -0.095120 1.621755 1.048509 0.997122 0.721763 0.660834 -1.076324 0.925997 -0.147393 -0.420465 1 0
11 -0.771763 -0.484525 -0.874411 0.647747 -1.241650 0.190918 0.457290 0.915208 1.999689 1.879761 0.491598 -0.164372 -0.560754 1 0
12 0.140770 1.869847 -1.926303 -2.491201 -2.679759 -1.527330 -0.299345 -0.550878 0.702947 0.143961 0.034796 -0.379551 -0.422354 0 0
13 -1.952477 -0.949813 0.063314 1.188657 1.059601 1.221319 0.070346 2.284107 2.889527 2.012105 1.053494 -0.178905 -2.004333 1 0
14 -0.895529 0.398850 -0.469782 1.216393 0.657294 -0.550619 -0.854637 -0.815454 1.929689 1.499328 -0.096775 -0.174183 -1.119396 1 0
15 -1.161372 1.475106 1.486594 0.127516 0.213940 0.587080 -0.789652 0.130203 1.199389 1.458358 0.404206 0.754289 -0.784214 1 0
16 -0.476792 2.179287 0.101035 -1.393755 -0.740834 0.589666 0.873850 0.630539 0.535702 0.387326 -0.979677 0.259755 0.313358 1 0
17 -0.089088 -0.841832 0.674093 -0.842623 0.904577 -1.476862 1.853427 -1.108621 0.720923 0.383320 -1.842030 1.712321 -1.612726 0 0
18 -1.772732 0.488101 0.057829 0.041074 0.732429 1.052187 0.279830 -0.350521 -0.476338 -0.833438 0.184849 -0.055428 0.627307 1 0
19 -0.640351 0.068493 0.619966 -0.599171 0.860806 -0.385120 1.955087 -1.014740 1.224043 1.450896 -2.604448 2.187869 -0.464774 1 0
20 0.590240 0.699904 -0.097902 0.127319 -0.882999 0.319144 -0.146142 -0.540616 0.300593 0.688863 0.314647 0.709538 0.572811 1 0
21 0.500240 0.875222 -0.833826 0.377484 0.023480 1.321472 1.094037 0.734507 0.141947 0.214524 0.508556 -0.265911 -0.372316 1 0
22 -0.076653 0.518030 0.003390 0.452969 -0.218736 0.115409 0.332618 0.611098 0.211893 -0.206368 0.358363 0.614915 0.518172 1 0
23 0.010763 -0.352873 -0.460051 0.423968 -0.228393 -0.040296 -0.740869 -0.810034 -1.379366 -0.179024 0.147810 -0.224826 0.615011 1 0
24 0.874600 0.173728 -1.041125 0.845285 1.139221 0.264458 -0.378878 0.430226 -0.568469 -1.237333 0.032074 0.812111 0.431460 1 0
25 0.200637 0.337376 0.022126 1.189135 -0.210135 -1.195492 0.067874 1.349711 -0.534365 -0.132754 0.055132 0.239009 -0.275633 1 0
26 0.362627 0.159292 -1.211688 -0.555502 0.107540 0.797027 -0.246321 -1.113565 -1.373054 -2.369077 -0.539483 1.032005 1.637730 1 0
27 -0.504648 -0.561515 -2.173809 -1.525691 -0.810132 -0.617474 0.441103 1.146056 1.464488 -1.111032 -0.742722 0.034623 0.200147 0 0
28 -0.339646 -2.140319 -1.409226 -0.207553 -1.216547 -1.135346 -0.831817 1.136334 -0.187159 1.388841 0.282573 -0.807850 -0.371992 0 0
29 -1.279089 1.555887 0.890503 2.134195 0.337580 -0.037382 -2.046955 -2.888113 1.329665 1.436687 -1.576201 0.485256 1.429246 1 0
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
279 -1.132789 -0.931481 -0.350024 -0.228575 -1.201208 -1.044342 0.532403 1.667036 1.383485 -0.967474 -0.286625 -1.920618 -0.797190 0 1
280 -0.375948 0.058369 0.489068 0.862825 -1.876102 -0.195043 -1.163295 0.716190 0.384576 -0.168340 1.542126 -0.769460 0.456686 1 1
281 0.412883 -1.703432 -0.514845 -1.382818 -0.713972 -0.476089 1.471006 0.826485 0.508608 -1.311788 -2.010635 -1.122699 -0.848851 0 1
282 -0.152329 -2.012108 -0.217355 -1.122627 -0.851075 0.634424 1.711007 0.281350 -0.565156 -1.667195 -1.942452 -1.586592 -0.485128 0 1
283 0.348443 -2.381428 1.267515 -1.713290 0.161262 -1.589515 1.383857 -0.218429 0.412550 0.382171 -1.073499 -1.745128 -3.227845 0 1
284 -0.895866 1.001673 1.059356 0.166883 -0.710729 0.466737 -0.857566 -0.158962 0.004241 0.391823 0.576231 0.329506 -1.331272 1 1
285 0.417102 1.957515 2.350604 -1.125042 -2.206390 -0.674814 -1.217854 0.372865 0.840465 -0.472910 0.310419 1.379494 1.128412 1 1
286 -0.900897 -0.289100 0.433265 -0.281829 -0.379951 1.272236 0.313949 -0.261980 -0.053111 0.473694 0.493962 -0.263293 -0.657598 1 1
287 -0.002448 -0.853612 0.441903 0.406478 -0.823085 0.590185 -0.292046 -0.079952 -0.422138 0.579522 -0.620415 -0.298847 0.620798 1 1
288 -0.528092 -1.022206 -0.348679 0.093718 -1.642833 -2.355166 -0.992806 -0.143423 0.270521 0.838321 0.843686 0.469574 -0.325121 0 1
289 -0.387248 -1.305014 -0.365540 0.202745 -0.906016 -1.785190 -1.377992 -0.544742 -0.670979 -0.785606 0.505505 0.502505 -0.151297 0 1
290 0.425324 -2.583173 -2.181080 -1.262030 -0.179265 0.176164 1.763096 0.436737 -2.048534 -1.014266 1.298221 0.401742 -1.080608 0 1
291 -0.572282 -0.375532 -2.067885 -0.361247 -0.315065 -0.671820 -0.183865 -0.517694 -0.802956 -0.951809 0.282442 0.208005 -0.271252 0 1
292 -0.084382 -1.508230 -0.105496 -1.930204 -1.529664 -0.795467 1.273717 -1.858542 -0.446361 -0.239346 0.154464 -0.114937 -1.831603 0 1
293 -1.172703 0.783209 -1.141589 -0.982768 -0.513216 0.655437 1.962510 0.628858 1.130028 1.104741 1.539591 1.547843 -0.011302 1 1
294 -1.293038 0.838303 -1.049071 -0.708031 -0.779995 0.868108 1.621994 0.725495 1.173585 1.424395 1.751950 1.352876 0.339922 1 1
295 0.809878 -0.351504 -2.231752 -0.556719 -1.430264 -0.357918 -0.727837 1.110363 1.684188 0.429768 0.560061 0.371789 -1.110030 0 1
296 0.305449 -0.148924 -0.727054 -0.126830 0.467272 0.420013 1.212777 0.954055 -0.988419 -0.423614 -0.047239 0.058678 -0.031517 0 1
297 1.212224 1.916789 0.287969 -0.073842 0.289112 0.943764 -0.395404 -0.380613 0.262567 0.759137 0.277177 0.493951 1.026995 1 1
298 -0.121307 0.217217 0.030920 -0.201270 -0.752001 -0.276070 0.835502 -0.363704 -0.641199 0.283313 0.060013 0.013280 0.477857 1 1
299 -0.622824 -0.595352 0.256282 -0.111551 0.023990 1.221659 1.572998 -0.263983 -0.707828 0.707801 0.306249 1.046476 0.214979 1 1
300 -0.667480 -0.808638 0.730781 0.054549 0.191421 0.279885 0.088177 0.823617 0.604299 0.640274 -0.360151 1.298688 0.494875 1 1
301 0.928382 -2.375767 -0.427528 -0.852350 -1.137004 1.584181 -1.700220 -2.060965 -1.326622 0.451948 0.593212 0.152418 -0.128797 0 1
302 -0.483888 0.443846 0.129714 0.199624 -0.106985 0.817702 -0.072817 -1.163918 0.545762 -0.141320 0.041767 -0.402181 0.061897 1 1
303 0.715769 0.780533 1.467750 -0.595580 -1.178484 4.014345 -0.112339 -1.611382 -0.295511 0.032462 1.836607 -4.315898 -1.084441 1 1
304 0.041466 -0.470275 0.234655 0.109532 -0.518455 -0.977540 -0.613498 -1.108545 0.500653 -0.214143 -0.033265 -0.541673 0.714974 1 1
305 0.818747 0.495675 1.005686 0.967334 0.505171 -0.579478 -0.847677 1.574323 1.544556 0.412556 -0.972040 0.290457 0.289042 1 1
306 1.062928 -1.149587 1.951840 -0.065775 0.546680 0.994901 -1.817826 2.109742 0.264443 0.505287 -0.757462 0.578677 0.222503 1 1
307 -0.701621 -0.049803 -0.719153 -0.048069 1.223251 1.913492 0.887449 0.038186 0.546172 -0.568362 -1.091833 -0.250367 0.831399 1 1
308 -0.079821 0.796085 -0.215763 -1.396439 -0.133350 0.582037 2.442796 0.743250 -1.182753 -0.723658 -0.879934 -2.498899 -1.532262 0 1

309 rows × 15 columns

In [101]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[101]:
<matplotlib.axes._subplots.AxesSubplot at 0x1e82b505400>
In [102]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[3]))

Hotel Marrakech

ANN

In [103]:
X = df_n_ps_std_mfcc[3].drop(columns='Cluster')
In [104]:
y = df_n_ps[3]['chosen']
In [105]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [106]:
X_train.shape
Out[106]:
(139, 13)
In [107]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [108]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [109]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [110]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'relu', 'hidden_layer_sizes': (30, 20, 10), 'learning_rate_init': 0.003, 'max_iter': 75}, que permiten obtener un Accuracy de 86.33% y un Kappa del 66.54
Tiempo total: 21.38 minutos
In [111]:
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [112]:
input_tensor = Input(shape = (n0,))
In [113]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = 'tanh')(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [114]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [115]:
model.summary()
Model: "model_4"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_4 (InputLayer)         (None, 13)                0         
_________________________________________________________________
dense_10 (Dense)             (None, 30)                420       
_________________________________________________________________
dense_11 (Dense)             (None, 20)                620       
_________________________________________________________________
dense_12 (Dense)             (None, 10)                210       
_________________________________________________________________
dense_13 (Dense)             (None, 1)                 11        
=================================================================
Total params: 1,261
Trainable params: 1,261
Non-trainable params: 0
_________________________________________________________________
In [116]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), batch_size= 32,
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 139 samples, validate on 47 samples
Epoch 1/75
139/139 [==============================] - 0s 2ms/step - loss: 0.7120 - accuracy: 0.4604 - val_loss: 0.6643 - val_accuracy: 0.6383
Epoch 2/75
139/139 [==============================] - 0s 101us/step - loss: 0.5930 - accuracy: 0.7410 - val_loss: 0.6515 - val_accuracy: 0.6809
Epoch 3/75
139/139 [==============================] - 0s 93us/step - loss: 0.5191 - accuracy: 0.7626 - val_loss: 0.6451 - val_accuracy: 0.7234
Epoch 4/75
139/139 [==============================] - 0s 86us/step - loss: 0.4626 - accuracy: 0.7842 - val_loss: 0.6396 - val_accuracy: 0.7872
Epoch 5/75
139/139 [==============================] - 0s 86us/step - loss: 0.4188 - accuracy: 0.8201 - val_loss: 0.6337 - val_accuracy: 0.7660
Epoch 6/75
139/139 [==============================] - 0s 93us/step - loss: 0.3836 - accuracy: 0.8489 - val_loss: 0.6403 - val_accuracy: 0.7660
Epoch 7/75
139/139 [==============================] - 0s 101us/step - loss: 0.3544 - accuracy: 0.8705 - val_loss: 0.6423 - val_accuracy: 0.7660
Epoch 8/75
139/139 [==============================] - 0s 101us/step - loss: 0.3328 - accuracy: 0.8705 - val_loss: 0.6390 - val_accuracy: 0.7660
Epoch 9/75
139/139 [==============================] - 0s 101us/step - loss: 0.3117 - accuracy: 0.8777 - val_loss: 0.6345 - val_accuracy: 0.7660
Epoch 10/75
139/139 [==============================] - 0s 93us/step - loss: 0.3017 - accuracy: 0.8705 - val_loss: 0.6233 - val_accuracy: 0.7660
Epoch 11/75
139/139 [==============================] - 0s 129us/step - loss: 0.2924 - accuracy: 0.8849 - val_loss: 0.6335 - val_accuracy: 0.7872
Epoch 12/75
139/139 [==============================] - 0s 93us/step - loss: 0.2825 - accuracy: 0.8777 - val_loss: 0.6348 - val_accuracy: 0.7872
Epoch 13/75
139/139 [==============================] - 0s 86us/step - loss: 0.2727 - accuracy: 0.8777 - val_loss: 0.6450 - val_accuracy: 0.7872
Epoch 14/75
139/139 [==============================] - 0s 93us/step - loss: 0.2655 - accuracy: 0.8993 - val_loss: 0.6622 - val_accuracy: 0.7872

Epoch 00014: ReduceLROnPlateau reducing learning rate to 0.001500000013038516.
Epoch 15/75
139/139 [==============================] - 0s 93us/step - loss: 0.2583 - accuracy: 0.9065 - val_loss: 0.6709 - val_accuracy: 0.7872
Epoch 16/75
139/139 [==============================] - 0s 86us/step - loss: 0.2544 - accuracy: 0.9065 - val_loss: 0.6659 - val_accuracy: 0.7872
Epoch 17/75
139/139 [==============================] - 0s 101us/step - loss: 0.2510 - accuracy: 0.9065 - val_loss: 0.6681 - val_accuracy: 0.7660
Epoch 18/75
139/139 [==============================] - 0s 93us/step - loss: 0.2473 - accuracy: 0.9137 - val_loss: 0.6822 - val_accuracy: 0.7660
Epoch 19/75
139/139 [==============================] - 0s 86us/step - loss: 0.2434 - accuracy: 0.9137 - val_loss: 0.6909 - val_accuracy: 0.7660
Epoch 20/75
139/139 [==============================] - 0s 79us/step - loss: 0.2402 - accuracy: 0.9137 - val_loss: 0.6924 - val_accuracy: 0.7660
Epoch 21/75
139/139 [==============================] - 0s 137us/step - loss: 0.2373 - accuracy: 0.9137 - val_loss: 0.6904 - val_accuracy: 0.7660
Epoch 22/75
139/139 [==============================] - 0s 101us/step - loss: 0.2347 - accuracy: 0.9137 - val_loss: 0.6874 - val_accuracy: 0.7660
Epoch 23/75
139/139 [==============================] - 0s 93us/step - loss: 0.2313 - accuracy: 0.9137 - val_loss: 0.6854 - val_accuracy: 0.7660
Epoch 24/75
139/139 [==============================] - 0s 86us/step - loss: 0.2278 - accuracy: 0.9137 - val_loss: 0.6835 - val_accuracy: 0.7660

Epoch 00024: ReduceLROnPlateau reducing learning rate to 0.000750000006519258.
Epoch 25/75
139/139 [==============================] - 0s 86us/step - loss: 0.2249 - accuracy: 0.9137 - val_loss: 0.6845 - val_accuracy: 0.7660
Epoch 26/75
139/139 [==============================] - 0s 101us/step - loss: 0.2242 - accuracy: 0.9137 - val_loss: 0.6798 - val_accuracy: 0.7660
Epoch 27/75
139/139 [==============================] - 0s 93us/step - loss: 0.2229 - accuracy: 0.9137 - val_loss: 0.6763 - val_accuracy: 0.7660
Epoch 28/75
139/139 [==============================] - 0s 101us/step - loss: 0.2213 - accuracy: 0.9137 - val_loss: 0.6758 - val_accuracy: 0.7660
Epoch 29/75
139/139 [==============================] - 0s 86us/step - loss: 0.2198 - accuracy: 0.9137 - val_loss: 0.6758 - val_accuracy: 0.7660
Epoch 30/75
139/139 [==============================] - 0s 86us/step - loss: 0.2179 - accuracy: 0.9209 - val_loss: 0.6753 - val_accuracy: 0.7660
Epoch 31/75
139/139 [==============================] - 0s 79us/step - loss: 0.2163 - accuracy: 0.9209 - val_loss: 0.6761 - val_accuracy: 0.7660
Epoch 32/75
139/139 [==============================] - 0s 86us/step - loss: 0.2150 - accuracy: 0.9209 - val_loss: 0.6781 - val_accuracy: 0.7660
Epoch 33/75
139/139 [==============================] - 0s 122us/step - loss: 0.2136 - accuracy: 0.9281 - val_loss: 0.6806 - val_accuracy: 0.7660
Epoch 34/75
139/139 [==============================] - 0s 158us/step - loss: 0.2121 - accuracy: 0.9281 - val_loss: 0.6834 - val_accuracy: 0.7660

Epoch 00034: ReduceLROnPlateau reducing learning rate to 0.000375000003259629.
Epoch 35/75
139/139 [==============================] - 0s 129us/step - loss: 0.2106 - accuracy: 0.9281 - val_loss: 0.6837 - val_accuracy: 0.7660
Epoch 36/75
139/139 [==============================] - 0s 108us/step - loss: 0.2099 - accuracy: 0.9281 - val_loss: 0.6845 - val_accuracy: 0.7660
Epoch 37/75
139/139 [==============================] - 0s 108us/step - loss: 0.2092 - accuracy: 0.9281 - val_loss: 0.6846 - val_accuracy: 0.7660
Epoch 38/75
139/139 [==============================] - 0s 93us/step - loss: 0.2085 - accuracy: 0.9281 - val_loss: 0.6834 - val_accuracy: 0.7660
Epoch 39/75
139/139 [==============================] - 0s 93us/step - loss: 0.2078 - accuracy: 0.9281 - val_loss: 0.6844 - val_accuracy: 0.7660
Epoch 40/75
139/139 [==============================] - 0s 93us/step - loss: 0.2070 - accuracy: 0.9281 - val_loss: 0.6853 - val_accuracy: 0.7660
Epoch 41/75
139/139 [==============================] - 0s 93us/step - loss: 0.2064 - accuracy: 0.9281 - val_loss: 0.6865 - val_accuracy: 0.7660
Epoch 42/75
139/139 [==============================] - 0s 93us/step - loss: 0.2055 - accuracy: 0.9281 - val_loss: 0.6884 - val_accuracy: 0.7660
Epoch 43/75
139/139 [==============================] - 0s 93us/step - loss: 0.2050 - accuracy: 0.9281 - val_loss: 0.6905 - val_accuracy: 0.7660
Epoch 44/75
139/139 [==============================] - 0s 93us/step - loss: 0.2046 - accuracy: 0.9281 - val_loss: 0.6924 - val_accuracy: 0.7660

Epoch 00044: ReduceLROnPlateau reducing learning rate to 0.0001875000016298145.
Epoch 45/75
139/139 [==============================] - 0s 129us/step - loss: 0.2036 - accuracy: 0.9281 - val_loss: 0.6908 - val_accuracy: 0.7660
Epoch 46/75
139/139 [==============================] - 0s 108us/step - loss: 0.2031 - accuracy: 0.9281 - val_loss: 0.6889 - val_accuracy: 0.7660
Epoch 47/75
139/139 [==============================] - 0s 86us/step - loss: 0.2027 - accuracy: 0.9281 - val_loss: 0.6871 - val_accuracy: 0.7660
Epoch 48/75
139/139 [==============================] - 0s 72us/step - loss: 0.2024 - accuracy: 0.9281 - val_loss: 0.6861 - val_accuracy: 0.7660
Epoch 49/75
139/139 [==============================] - 0s 93us/step - loss: 0.2021 - accuracy: 0.9281 - val_loss: 0.6861 - val_accuracy: 0.7660
Epoch 50/75
139/139 [==============================] - 0s 79us/step - loss: 0.2016 - accuracy: 0.9281 - val_loss: 0.6864 - val_accuracy: 0.7660
Epoch 51/75
139/139 [==============================] - 0s 86us/step - loss: 0.2013 - accuracy: 0.9281 - val_loss: 0.6864 - val_accuracy: 0.7660
Epoch 52/75
139/139 [==============================] - 0s 79us/step - loss: 0.2010 - accuracy: 0.9281 - val_loss: 0.6871 - val_accuracy: 0.7660
Epoch 53/75
139/139 [==============================] - 0s 79us/step - loss: 0.2006 - accuracy: 0.9281 - val_loss: 0.6876 - val_accuracy: 0.7660
Epoch 54/75
139/139 [==============================] - 0s 72us/step - loss: 0.2003 - accuracy: 0.9281 - val_loss: 0.6873 - val_accuracy: 0.7660

Epoch 00054: ReduceLROnPlateau reducing learning rate to 9.375000081490725e-05.
Epoch 55/75
139/139 [==============================] - 0s 72us/step - loss: 0.1998 - accuracy: 0.9281 - val_loss: 0.6875 - val_accuracy: 0.7660
Epoch 56/75
139/139 [==============================] - 0s 122us/step - loss: 0.1997 - accuracy: 0.9281 - val_loss: 0.6872 - val_accuracy: 0.7660
Epoch 57/75
139/139 [==============================] - 0s 86us/step - loss: 0.1995 - accuracy: 0.9281 - val_loss: 0.6871 - val_accuracy: 0.7660
Epoch 58/75
139/139 [==============================] - 0s 79us/step - loss: 0.1993 - accuracy: 0.9281 - val_loss: 0.6874 - val_accuracy: 0.7660
Epoch 59/75
139/139 [==============================] - 0s 79us/step - loss: 0.1991 - accuracy: 0.9281 - val_loss: 0.6880 - val_accuracy: 0.7660
Epoch 60/75
139/139 [==============================] - 0s 72us/step - loss: 0.1990 - accuracy: 0.9281 - val_loss: 0.6874 - val_accuracy: 0.7660
Epoch 61/75
139/139 [==============================] - 0s 79us/step - loss: 0.1987 - accuracy: 0.9281 - val_loss: 0.6869 - val_accuracy: 0.7660
Epoch 62/75
139/139 [==============================] - 0s 79us/step - loss: 0.1986 - accuracy: 0.9281 - val_loss: 0.6867 - val_accuracy: 0.7660
Epoch 63/75
139/139 [==============================] - 0s 72us/step - loss: 0.1983 - accuracy: 0.9281 - val_loss: 0.6867 - val_accuracy: 0.7660
Epoch 64/75
139/139 [==============================] - 0s 79us/step - loss: 0.1981 - accuracy: 0.9281 - val_loss: 0.6868 - val_accuracy: 0.7660

Epoch 00064: ReduceLROnPlateau reducing learning rate to 4.6875000407453626e-05.
Epoch 65/75
139/139 [==============================] - 0s 79us/step - loss: 0.1980 - accuracy: 0.9281 - val_loss: 0.6868 - val_accuracy: 0.7660
Epoch 66/75
139/139 [==============================] - 0s 79us/step - loss: 0.1979 - accuracy: 0.9281 - val_loss: 0.6867 - val_accuracy: 0.7660
Epoch 67/75
139/139 [==============================] - 0s 79us/step - loss: 0.1978 - accuracy: 0.9281 - val_loss: 0.6866 - val_accuracy: 0.7660
Epoch 68/75
139/139 [==============================] - 0s 101us/step - loss: 0.1977 - accuracy: 0.9281 - val_loss: 0.6866 - val_accuracy: 0.7660
Epoch 69/75
139/139 [==============================] - 0s 122us/step - loss: 0.1977 - accuracy: 0.9281 - val_loss: 0.6867 - val_accuracy: 0.7660
Epoch 70/75
139/139 [==============================] - 0s 86us/step - loss: 0.1975 - accuracy: 0.9281 - val_loss: 0.6870 - val_accuracy: 0.7660
Epoch 71/75
139/139 [==============================] - 0s 86us/step - loss: 0.1974 - accuracy: 0.9281 - val_loss: 0.6871 - val_accuracy: 0.7660
Epoch 72/75
139/139 [==============================] - 0s 72us/step - loss: 0.1973 - accuracy: 0.9281 - val_loss: 0.6873 - val_accuracy: 0.7660
Epoch 73/75
139/139 [==============================] - 0s 79us/step - loss: 0.1972 - accuracy: 0.9281 - val_loss: 0.6872 - val_accuracy: 0.7660
Epoch 74/75
139/139 [==============================] - 0s 79us/step - loss: 0.1972 - accuracy: 0.9281 - val_loss: 0.6873 - val_accuracy: 0.7660

Epoch 00074: ReduceLROnPlateau reducing learning rate to 2.3437500203726813e-05.
Epoch 75/75
139/139 [==============================] - 0s 93us/step - loss: 0.1971 - accuracy: 0.9281 - val_loss: 0.6875 - val_accuracy: 0.7660
In [117]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 75)
In [118]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
47/47 [==============================] - 0s 106us/step
test loss: 0.687487561017909, test accuracy: 0.7659574747085571
In [119]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.7903225806451613
In [120]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
Kappa:  0.396732788798133

KMeans

In [121]:
X
Out[121]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13
0 0.311006 1.696486 0.912001 -0.211934 -0.513557 1.357699 0.777385 0.508585 -2.290902 -2.422249 -0.738438 -2.221933 -1.191363
1 0.947147 -0.576741 -1.258913 -0.786859 0.887961 -1.895175 -0.310427 -0.374360 1.478062 0.972075 -1.105984 0.612318 -1.486887
2 -0.115048 1.257050 0.043002 -2.677464 3.902183 -1.091787 0.505797 2.341684 -2.345224 -1.678088 -2.608854 -2.617777 -2.135652
3 0.621032 1.417449 1.399722 -0.625673 1.012110 0.230671 -0.287988 1.012771 -2.250326 -0.340971 -0.353905 -0.717440 -0.390485
4 0.340978 1.662814 -1.775422 0.156552 1.678811 0.301711 2.038462 1.511985 1.508787 -2.046602 0.741073 -0.282747 -0.779814
5 0.426765 -1.056701 -1.244088 -0.696846 -0.372415 -0.847420 -0.209607 1.462924 -0.541420 0.000628 -1.135148 1.608546 1.709532
6 0.210857 -1.779497 -2.206121 -0.832640 0.636169 -1.979858 -0.510102 1.437770 0.128209 0.025521 0.184211 2.300204 0.912793
7 -0.821293 -0.049796 0.237440 0.379918 0.714133 0.670070 0.122605 -0.069298 -0.126759 -0.199559 0.547891 -0.099623 -0.024895
8 0.420103 -0.662020 -0.550543 -0.566406 -0.923203 -0.295152 -0.533234 0.927026 0.119135 0.218761 -0.245778 0.627242 1.313952
9 -1.436247 0.435343 2.482690 1.099668 -0.392845 0.565039 0.569531 -0.088218 -0.131137 -0.699769 -0.538549 -0.329443 0.942919
10 -1.574051 -1.334372 -1.636184 1.768991 -0.369456 -0.008046 -1.402331 0.012625 1.135935 1.623145 -0.653935 0.182348 1.052310
11 -1.798986 -1.632467 -1.314854 2.656006 -0.096678 -0.174852 -1.748372 0.185804 0.930317 0.365776 -0.676448 0.358271 1.523770
12 -0.708207 0.931180 0.258840 -0.189291 -0.204832 -0.103872 0.221697 -0.231695 -0.003439 0.423528 1.259835 0.119625 -0.192417
13 -2.007033 -0.288096 0.099713 0.390909 1.333138 -0.069950 0.643074 0.172080 -0.109666 0.304475 -1.157528 -1.708326 -1.420079
14 -0.497985 0.020592 -0.123619 0.165046 -0.765078 -0.465219 0.172533 0.722853 0.284863 -0.035284 0.024769 -0.065990 -0.992437
15 1.200625 0.984580 -0.234312 0.348855 0.175663 0.309396 0.390611 -0.745912 -0.667554 -0.052439 0.119610 -0.862930 0.945979
16 0.435253 3.280178 0.407736 1.143148 2.291571 0.546530 0.170667 0.427708 -0.063936 -0.532360 0.404150 0.415849 0.869331
17 -0.398944 0.035026 -1.634042 -1.354378 0.854385 1.406182 -0.773335 0.663902 0.928496 1.278830 0.464511 0.235475 -0.040374
18 -0.454008 -0.234096 -0.930672 -0.507506 0.545773 0.437756 1.026910 0.013959 -0.620099 -0.593763 1.073690 0.594340 0.987056
19 0.149846 0.062252 -0.002122 0.786346 0.810930 0.304880 -0.882886 -0.043156 2.503584 0.894947 0.394981 0.761651 0.402963
20 -0.314274 0.446482 0.889744 0.891114 1.249237 0.718469 0.296834 -0.831548 -0.393364 -0.103574 0.295790 0.092061 0.424633
21 0.659365 1.053258 -0.877939 -0.295954 -1.122110 -0.035202 1.512616 0.031457 -0.700740 -1.687204 -1.136215 -1.545451 -0.082548
22 0.568507 -0.357318 -1.183577 -0.069205 0.462644 -0.956011 0.501504 0.240708 -0.025482 0.416003 0.237690 -0.566935 -0.846151
23 0.696474 0.477607 -1.637469 -1.158983 -2.224208 -1.861929 -0.176558 0.694585 0.426826 -0.088376 -0.335290 1.125320 0.705700
24 -0.221795 -0.513464 -0.506448 0.594506 0.033232 -1.141879 -1.582503 -0.081204 -0.001962 -0.704687 -0.473528 0.580117 1.533686
25 0.036099 -0.007586 0.116729 0.438081 -1.526141 -1.994283 -1.014100 0.028630 -0.553238 -0.540795 0.467730 0.943285 0.498193
26 -0.291576 -0.372192 -1.176599 0.078535 0.516288 -1.851892 -2.218803 0.335200 0.323222 0.006649 0.017717 0.133172 1.208725
27 0.953536 0.427304 -0.554063 0.425439 1.368674 0.362392 0.477030 -0.976616 -0.382390 0.310619 -0.903078 -0.943886 -0.047616
28 -1.172014 1.307258 -1.059323 -0.655908 1.591107 0.483432 0.474862 0.348014 -0.527448 0.798802 -0.075253 1.943808 0.108268
29 -0.954427 0.000731 -0.367958 0.281024 0.303337 0.744504 1.271647 0.298340 -0.057042 -0.297712 -0.053703 -0.045043 -0.561554
... ... ... ... ... ... ... ... ... ... ... ... ... ...
156 0.232363 -1.167339 -0.114632 1.240724 -0.209611 0.597503 -0.105216 -0.393018 -0.168804 -0.038601 0.602075 -0.482733 0.192333
157 -1.686193 -0.806140 -0.531342 -0.411912 0.312945 0.751058 0.624837 -0.394463 0.549120 -1.174079 -1.374572 -1.950144 -0.652535
158 0.487798 1.116042 -0.308817 0.175231 -0.191701 -0.682970 0.502123 0.749073 1.365476 0.198244 1.283992 0.132188 0.482532
159 1.049575 0.742765 0.000505 0.670386 0.235663 -0.297404 0.891743 0.047729 0.086633 0.873400 0.552393 0.496793 0.659122
160 0.285967 0.602916 -0.009050 0.802464 0.333031 -1.182611 0.473870 0.896236 0.890391 0.208214 0.786475 0.044481 -0.114927
161 2.568510 -0.180837 0.794882 1.410838 0.898076 0.468184 0.963255 0.338074 2.081580 2.353196 0.146660 -0.295606 -0.020484
162 0.821849 0.906757 0.282262 0.304716 -0.691824 0.772704 2.543328 -0.404440 1.861464 1.635426 0.204673 0.084333 0.469447
163 2.581037 0.239015 1.212048 0.498566 0.095720 0.062469 3.463238 0.374969 -0.054235 -0.365031 -0.169020 1.160964 0.666076
164 -0.247271 -0.874145 -0.840584 0.233138 0.034101 0.259892 0.144353 -0.570094 1.244117 0.282845 0.127444 -0.721587 -1.450860
165 0.188979 -0.519200 0.108496 -0.513645 -0.637646 0.812515 0.626360 -0.156977 -0.092241 -0.517923 0.026563 -0.597616 -0.101096
166 -0.015438 -0.656621 -0.739614 0.302131 0.583862 0.465267 0.342075 -0.318902 0.221544 0.654368 0.777463 -0.462212 -0.867288
167 -1.567081 -1.052883 -0.417918 0.636963 -0.531279 0.787238 -1.913461 -0.020653 -0.111129 0.112259 -0.380422 0.497894 0.709826
168 -1.883530 -0.172892 -0.340073 -0.255266 -0.480237 -0.061425 -0.158589 -0.308725 -0.034923 0.150845 0.696367 0.704196 0.473391
169 -1.577057 -0.602693 0.448785 1.073850 -0.714538 1.427240 -1.645225 0.812069 -0.019466 -0.719024 -0.991241 0.521497 0.461555
170 0.667824 -0.298287 -0.412356 -1.154598 0.171532 -0.341146 -0.411827 -1.296671 0.428160 -0.233124 0.035111 -0.934501 0.123701
171 0.626343 -0.046406 -0.168999 -1.278941 0.502027 -0.060296 0.062571 -1.284727 0.028380 -0.488598 -0.255008 -1.199407 0.088115
172 -0.441233 -0.987406 0.015664 -2.982951 -0.379800 1.485748 -2.174788 -3.521120 -0.191502 2.486906 2.771782 0.789523 0.553119
173 0.331514 0.334707 -0.187508 0.489055 1.133140 1.016598 -0.572753 -0.634721 -0.567790 -0.492410 0.634219 -0.122575 -0.205540
174 0.480131 0.345645 0.202709 -0.423456 1.184414 2.116965 -0.463050 0.212550 1.973473 -0.996794 0.924229 -0.170049 -0.068464
175 1.039410 -0.773764 0.113739 -0.796036 -1.053802 -1.238009 0.153897 0.497600 1.347261 0.972165 0.993095 0.066125 -0.069772
176 0.194148 -0.229033 -0.571129 -0.704359 -0.204400 -0.273049 1.105329 0.052851 -0.360196 0.099095 1.063628 -0.266594 0.041526
177 -0.169188 0.025195 -0.189648 0.376353 0.802036 -1.185140 0.488985 0.244963 1.305631 0.241661 0.402415 -0.494815 0.252206
178 1.433007 0.217051 -0.388425 -1.158798 2.068592 1.278810 -1.193547 -0.909321 -0.207122 2.062093 1.374797 0.383804 1.569650
179 1.371536 0.624596 -0.082552 0.444824 1.959112 -0.736647 -1.448177 0.624897 1.304939 -0.025270 -0.519401 0.592135 1.312240
180 0.815061 -1.210119 0.844643 -1.152602 -0.216878 -1.573232 -0.065062 2.136014 -0.285964 1.827988 -0.982121 1.139199 0.936226
181 0.814962 -1.028970 -1.340094 -1.579784 0.774822 -0.351654 -2.148181 2.772395 1.638263 -0.394371 1.796246 1.182459 0.824064
182 1.615277 0.706391 -0.611277 0.513438 0.987249 1.226124 0.240966 0.485917 1.355615 -0.480955 -0.255325 -0.370864 0.107591
183 0.290224 0.578762 0.024629 0.119894 0.626180 1.025427 0.180541 -0.504388 -1.085411 -1.413825 0.811722 0.640653 0.433677
184 0.086408 -1.394139 -0.501233 1.251905 -0.481983 0.026482 -1.317983 -0.580623 -0.160381 -0.718194 0.110108 -0.183905 0.074891
185 0.024909 -0.713904 -1.235134 -0.194562 0.155358 -0.586587 -0.455970 0.577457 1.172268 0.468799 0.500130 1.133624 0.192845

186 rows × 13 columns

In [122]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[122]:
[2418.0,
 2172.1027618675616,
 1998.025251732407,
 1881.5714074571665,
 1790.4988909951144,
 1710.5509624175916,
 1638.1160961205187,
 1594.2979605381602,
 1541.5219098889218,
 1492.348355544395,
 1435.6626781919845,
 1397.526425851007,
 1339.3046499586067,
 1333.0833102955885]
In [123]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[123]:
[<matplotlib.lines.Line2D at 0x1e82cc61860>]

K=3

In [124]:
kmeans_mfcc = KMeans(n_clusters=3, random_state=0, n_init=10)
kmeans_mfcc.fit(X)
Out[124]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=3, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [125]:
kmeans_mfcc.labels_
Out[125]:
array([1, 2, 1, 1, 0, 2, 2, 0, 2, 0, 2, 2, 0, 1, 1, 0, 0, 2, 0, 0, 0, 1,
       1, 2, 2, 2, 2, 0, 0, 1, 2, 2, 2, 1, 1, 0, 2, 2, 2, 0, 2, 1, 1, 1,
       1, 2, 0, 2, 1, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 2, 1, 2,
       2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 1, 1, 0, 2, 1, 0, 2, 1, 0, 1, 2, 2,
       0, 1, 1, 0, 1, 0, 2, 2, 2, 1, 2, 2, 0, 1, 2, 1, 2, 2, 2, 1, 1, 1,
       2, 2, 2, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 2, 1, 1, 0, 2, 1, 1, 0,
       0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 2, 1, 2, 2, 1, 0, 2, 2, 1, 0, 1, 0,
       1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 1, 1, 2, 0, 0, 2,
       1, 0, 0, 0, 2, 2, 0, 0, 2, 2])
In [126]:
clusters_mfcc = kmeans_mfcc.predict(X)
clusters_mfcc
Out[126]:
array([1, 2, 1, 1, 0, 2, 2, 0, 2, 0, 2, 2, 0, 1, 1, 0, 0, 2, 0, 0, 0, 1,
       1, 2, 2, 2, 2, 0, 0, 1, 2, 2, 2, 1, 1, 0, 2, 2, 2, 0, 2, 1, 1, 1,
       1, 2, 0, 2, 1, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 2, 1, 2,
       2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 1, 1, 0, 2, 1, 0, 2, 1, 0, 1, 2, 2,
       0, 1, 1, 0, 1, 0, 2, 2, 2, 1, 2, 2, 0, 1, 2, 1, 2, 2, 2, 1, 1, 1,
       2, 2, 2, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 2, 1, 1, 0, 2, 1, 1, 0,
       0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 2, 1, 2, 2, 1, 0, 2, 2, 1, 0, 1, 0,
       1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 1, 1, 2, 0, 0, 2,
       1, 0, 0, 0, 2, 2, 0, 0, 2, 2])
In [127]:
X.loc[:,'Cluster'] = clusters_mfcc
X.loc[:,'chosen'] = list(y)
In [128]:
X
Out[128]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13 Cluster chosen
0 0.311006 1.696486 0.912001 -0.211934 -0.513557 1.357699 0.777385 0.508585 -2.290902 -2.422249 -0.738438 -2.221933 -1.191363 1 0
1 0.947147 -0.576741 -1.258913 -0.786859 0.887961 -1.895175 -0.310427 -0.374360 1.478062 0.972075 -1.105984 0.612318 -1.486887 2 0
2 -0.115048 1.257050 0.043002 -2.677464 3.902183 -1.091787 0.505797 2.341684 -2.345224 -1.678088 -2.608854 -2.617777 -2.135652 1 0
3 0.621032 1.417449 1.399722 -0.625673 1.012110 0.230671 -0.287988 1.012771 -2.250326 -0.340971 -0.353905 -0.717440 -0.390485 1 0
4 0.340978 1.662814 -1.775422 0.156552 1.678811 0.301711 2.038462 1.511985 1.508787 -2.046602 0.741073 -0.282747 -0.779814 0 0
5 0.426765 -1.056701 -1.244088 -0.696846 -0.372415 -0.847420 -0.209607 1.462924 -0.541420 0.000628 -1.135148 1.608546 1.709532 2 0
6 0.210857 -1.779497 -2.206121 -0.832640 0.636169 -1.979858 -0.510102 1.437770 0.128209 0.025521 0.184211 2.300204 0.912793 2 0
7 -0.821293 -0.049796 0.237440 0.379918 0.714133 0.670070 0.122605 -0.069298 -0.126759 -0.199559 0.547891 -0.099623 -0.024895 0 0
8 0.420103 -0.662020 -0.550543 -0.566406 -0.923203 -0.295152 -0.533234 0.927026 0.119135 0.218761 -0.245778 0.627242 1.313952 2 0
9 -1.436247 0.435343 2.482690 1.099668 -0.392845 0.565039 0.569531 -0.088218 -0.131137 -0.699769 -0.538549 -0.329443 0.942919 0 0
10 -1.574051 -1.334372 -1.636184 1.768991 -0.369456 -0.008046 -1.402331 0.012625 1.135935 1.623145 -0.653935 0.182348 1.052310 2 0
11 -1.798986 -1.632467 -1.314854 2.656006 -0.096678 -0.174852 -1.748372 0.185804 0.930317 0.365776 -0.676448 0.358271 1.523770 2 0
12 -0.708207 0.931180 0.258840 -0.189291 -0.204832 -0.103872 0.221697 -0.231695 -0.003439 0.423528 1.259835 0.119625 -0.192417 0 0
13 -2.007033 -0.288096 0.099713 0.390909 1.333138 -0.069950 0.643074 0.172080 -0.109666 0.304475 -1.157528 -1.708326 -1.420079 1 0
14 -0.497985 0.020592 -0.123619 0.165046 -0.765078 -0.465219 0.172533 0.722853 0.284863 -0.035284 0.024769 -0.065990 -0.992437 1 0
15 1.200625 0.984580 -0.234312 0.348855 0.175663 0.309396 0.390611 -0.745912 -0.667554 -0.052439 0.119610 -0.862930 0.945979 0 0
16 0.435253 3.280178 0.407736 1.143148 2.291571 0.546530 0.170667 0.427708 -0.063936 -0.532360 0.404150 0.415849 0.869331 0 0
17 -0.398944 0.035026 -1.634042 -1.354378 0.854385 1.406182 -0.773335 0.663902 0.928496 1.278830 0.464511 0.235475 -0.040374 2 0
18 -0.454008 -0.234096 -0.930672 -0.507506 0.545773 0.437756 1.026910 0.013959 -0.620099 -0.593763 1.073690 0.594340 0.987056 0 0
19 0.149846 0.062252 -0.002122 0.786346 0.810930 0.304880 -0.882886 -0.043156 2.503584 0.894947 0.394981 0.761651 0.402963 0 0
20 -0.314274 0.446482 0.889744 0.891114 1.249237 0.718469 0.296834 -0.831548 -0.393364 -0.103574 0.295790 0.092061 0.424633 0 0
21 0.659365 1.053258 -0.877939 -0.295954 -1.122110 -0.035202 1.512616 0.031457 -0.700740 -1.687204 -1.136215 -1.545451 -0.082548 1 0
22 0.568507 -0.357318 -1.183577 -0.069205 0.462644 -0.956011 0.501504 0.240708 -0.025482 0.416003 0.237690 -0.566935 -0.846151 1 0
23 0.696474 0.477607 -1.637469 -1.158983 -2.224208 -1.861929 -0.176558 0.694585 0.426826 -0.088376 -0.335290 1.125320 0.705700 2 0
24 -0.221795 -0.513464 -0.506448 0.594506 0.033232 -1.141879 -1.582503 -0.081204 -0.001962 -0.704687 -0.473528 0.580117 1.533686 2 0
25 0.036099 -0.007586 0.116729 0.438081 -1.526141 -1.994283 -1.014100 0.028630 -0.553238 -0.540795 0.467730 0.943285 0.498193 2 0
26 -0.291576 -0.372192 -1.176599 0.078535 0.516288 -1.851892 -2.218803 0.335200 0.323222 0.006649 0.017717 0.133172 1.208725 2 0
27 0.953536 0.427304 -0.554063 0.425439 1.368674 0.362392 0.477030 -0.976616 -0.382390 0.310619 -0.903078 -0.943886 -0.047616 0 0
28 -1.172014 1.307258 -1.059323 -0.655908 1.591107 0.483432 0.474862 0.348014 -0.527448 0.798802 -0.075253 1.943808 0.108268 0 0
29 -0.954427 0.000731 -0.367958 0.281024 0.303337 0.744504 1.271647 0.298340 -0.057042 -0.297712 -0.053703 -0.045043 -0.561554 1 0
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
156 0.232363 -1.167339 -0.114632 1.240724 -0.209611 0.597503 -0.105216 -0.393018 -0.168804 -0.038601 0.602075 -0.482733 0.192333 0 1
157 -1.686193 -0.806140 -0.531342 -0.411912 0.312945 0.751058 0.624837 -0.394463 0.549120 -1.174079 -1.374572 -1.950144 -0.652535 1 1
158 0.487798 1.116042 -0.308817 0.175231 -0.191701 -0.682970 0.502123 0.749073 1.365476 0.198244 1.283992 0.132188 0.482532 0 1
159 1.049575 0.742765 0.000505 0.670386 0.235663 -0.297404 0.891743 0.047729 0.086633 0.873400 0.552393 0.496793 0.659122 0 1
160 0.285967 0.602916 -0.009050 0.802464 0.333031 -1.182611 0.473870 0.896236 0.890391 0.208214 0.786475 0.044481 -0.114927 0 1
161 2.568510 -0.180837 0.794882 1.410838 0.898076 0.468184 0.963255 0.338074 2.081580 2.353196 0.146660 -0.295606 -0.020484 0 1
162 0.821849 0.906757 0.282262 0.304716 -0.691824 0.772704 2.543328 -0.404440 1.861464 1.635426 0.204673 0.084333 0.469447 0 1
163 2.581037 0.239015 1.212048 0.498566 0.095720 0.062469 3.463238 0.374969 -0.054235 -0.365031 -0.169020 1.160964 0.666076 0 1
164 -0.247271 -0.874145 -0.840584 0.233138 0.034101 0.259892 0.144353 -0.570094 1.244117 0.282845 0.127444 -0.721587 -1.450860 1 1
165 0.188979 -0.519200 0.108496 -0.513645 -0.637646 0.812515 0.626360 -0.156977 -0.092241 -0.517923 0.026563 -0.597616 -0.101096 1 1
166 -0.015438 -0.656621 -0.739614 0.302131 0.583862 0.465267 0.342075 -0.318902 0.221544 0.654368 0.777463 -0.462212 -0.867288 1 1
167 -1.567081 -1.052883 -0.417918 0.636963 -0.531279 0.787238 -1.913461 -0.020653 -0.111129 0.112259 -0.380422 0.497894 0.709826 2 1
168 -1.883530 -0.172892 -0.340073 -0.255266 -0.480237 -0.061425 -0.158589 -0.308725 -0.034923 0.150845 0.696367 0.704196 0.473391 2 1
169 -1.577057 -0.602693 0.448785 1.073850 -0.714538 1.427240 -1.645225 0.812069 -0.019466 -0.719024 -0.991241 0.521497 0.461555 2 1
170 0.667824 -0.298287 -0.412356 -1.154598 0.171532 -0.341146 -0.411827 -1.296671 0.428160 -0.233124 0.035111 -0.934501 0.123701 1 1
171 0.626343 -0.046406 -0.168999 -1.278941 0.502027 -0.060296 0.062571 -1.284727 0.028380 -0.488598 -0.255008 -1.199407 0.088115 1 1
172 -0.441233 -0.987406 0.015664 -2.982951 -0.379800 1.485748 -2.174788 -3.521120 -0.191502 2.486906 2.771782 0.789523 0.553119 2 1
173 0.331514 0.334707 -0.187508 0.489055 1.133140 1.016598 -0.572753 -0.634721 -0.567790 -0.492410 0.634219 -0.122575 -0.205540 0 1
174 0.480131 0.345645 0.202709 -0.423456 1.184414 2.116965 -0.463050 0.212550 1.973473 -0.996794 0.924229 -0.170049 -0.068464 0 1
175 1.039410 -0.773764 0.113739 -0.796036 -1.053802 -1.238009 0.153897 0.497600 1.347261 0.972165 0.993095 0.066125 -0.069772 2 1
176 0.194148 -0.229033 -0.571129 -0.704359 -0.204400 -0.273049 1.105329 0.052851 -0.360196 0.099095 1.063628 -0.266594 0.041526 1 1
177 -0.169188 0.025195 -0.189648 0.376353 0.802036 -1.185140 0.488985 0.244963 1.305631 0.241661 0.402415 -0.494815 0.252206 0 1
178 1.433007 0.217051 -0.388425 -1.158798 2.068592 1.278810 -1.193547 -0.909321 -0.207122 2.062093 1.374797 0.383804 1.569650 0 1
179 1.371536 0.624596 -0.082552 0.444824 1.959112 -0.736647 -1.448177 0.624897 1.304939 -0.025270 -0.519401 0.592135 1.312240 0 1
180 0.815061 -1.210119 0.844643 -1.152602 -0.216878 -1.573232 -0.065062 2.136014 -0.285964 1.827988 -0.982121 1.139199 0.936226 2 1
181 0.814962 -1.028970 -1.340094 -1.579784 0.774822 -0.351654 -2.148181 2.772395 1.638263 -0.394371 1.796246 1.182459 0.824064 2 1
182 1.615277 0.706391 -0.611277 0.513438 0.987249 1.226124 0.240966 0.485917 1.355615 -0.480955 -0.255325 -0.370864 0.107591 0 1
183 0.290224 0.578762 0.024629 0.119894 0.626180 1.025427 0.180541 -0.504388 -1.085411 -1.413825 0.811722 0.640653 0.433677 0 1
184 0.086408 -1.394139 -0.501233 1.251905 -0.481983 0.026482 -1.317983 -0.580623 -0.160381 -0.718194 0.110108 -0.183905 0.074891 2 1
185 0.024909 -0.713904 -1.235134 -0.194562 0.155358 -0.586587 -0.455970 0.577457 1.172268 0.468799 0.500130 1.133624 0.192845 2 1

186 rows × 15 columns

In [129]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[129]:
<matplotlib.axes._subplots.AxesSubplot at 0x1e82cc917b8>
In [130]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[4]))

Specialized

ANN

In [131]:
X = df_n_ps_std_mfcc[4].drop(columns='Cluster')
In [132]:
y = df_n_ps[4]['chosen']
In [133]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [134]:
X_train.shape
Out[134]:
(164, 13)
In [135]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [136]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [137]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [138]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'relu', 'hidden_layer_sizes': (20, 20, 20), 'learning_rate_init': 0.003, 'max_iter': 400}, que permiten obtener un Accuracy de 72.56% y un Kappa del 45.12
Tiempo total: 20.25 minutos
In [139]:
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [140]:
input_tensor = Input(shape = (n0,))
In [141]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = 'tanh')(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [142]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [143]:
model.summary()
Model: "model_5"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_5 (InputLayer)         (None, 13)                0         
_________________________________________________________________
dense_14 (Dense)             (None, 20)                280       
_________________________________________________________________
dense_15 (Dense)             (None, 20)                420       
_________________________________________________________________
dense_16 (Dense)             (None, 20)                420       
_________________________________________________________________
dense_17 (Dense)             (None, 1)                 21        
=================================================================
Total params: 1,141
Trainable params: 1,141
Non-trainable params: 0
_________________________________________________________________
In [144]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), batch_size= 32,
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 164 samples, validate on 55 samples
Epoch 1/400
164/164 [==============================] - 0s 1ms/step - loss: 0.7149 - accuracy: 0.5244 - val_loss: 0.7183 - val_accuracy: 0.5273
Epoch 2/400
164/164 [==============================] - 0s 67us/step - loss: 0.6628 - accuracy: 0.5854 - val_loss: 0.7022 - val_accuracy: 0.6182
Epoch 3/400
164/164 [==============================] - 0s 73us/step - loss: 0.6490 - accuracy: 0.6707 - val_loss: 0.6958 - val_accuracy: 0.6364
Epoch 4/400
164/164 [==============================] - 0s 67us/step - loss: 0.6445 - accuracy: 0.6707 - val_loss: 0.6991 - val_accuracy: 0.6182
Epoch 5/400
164/164 [==============================] - 0s 73us/step - loss: 0.6399 - accuracy: 0.6646 - val_loss: 0.6941 - val_accuracy: 0.6182
Epoch 6/400
164/164 [==============================] - 0s 73us/step - loss: 0.6294 - accuracy: 0.6951 - val_loss: 0.6918 - val_accuracy: 0.6182
Epoch 7/400
164/164 [==============================] - 0s 104us/step - loss: 0.6196 - accuracy: 0.6951 - val_loss: 0.6923 - val_accuracy: 0.6182
Epoch 8/400
164/164 [==============================] - 0s 73us/step - loss: 0.6167 - accuracy: 0.7134 - val_loss: 0.6890 - val_accuracy: 0.6000
Epoch 9/400
164/164 [==============================] - 0s 67us/step - loss: 0.6139 - accuracy: 0.6829 - val_loss: 0.6929 - val_accuracy: 0.6000
Epoch 10/400
164/164 [==============================] - 0s 73us/step - loss: 0.6108 - accuracy: 0.6890 - val_loss: 0.6929 - val_accuracy: 0.6182
Epoch 11/400
164/164 [==============================] - 0s 79us/step - loss: 0.6044 - accuracy: 0.7012 - val_loss: 0.6875 - val_accuracy: 0.6000
Epoch 12/400
164/164 [==============================] - 0s 79us/step - loss: 0.5918 - accuracy: 0.7073 - val_loss: 0.6807 - val_accuracy: 0.6545
Epoch 13/400
164/164 [==============================] - 0s 67us/step - loss: 0.5862 - accuracy: 0.7073 - val_loss: 0.6789 - val_accuracy: 0.6545
Epoch 14/400
164/164 [==============================] - 0s 110us/step - loss: 0.5762 - accuracy: 0.6890 - val_loss: 0.6712 - val_accuracy: 0.6545
Epoch 15/400
164/164 [==============================] - 0s 73us/step - loss: 0.5665 - accuracy: 0.7073 - val_loss: 0.6710 - val_accuracy: 0.6727
Epoch 16/400
164/164 [==============================] - 0s 73us/step - loss: 0.5596 - accuracy: 0.7134 - val_loss: 0.6779 - val_accuracy: 0.6727
Epoch 17/400
164/164 [==============================] - 0s 73us/step - loss: 0.5537 - accuracy: 0.7012 - val_loss: 0.6888 - val_accuracy: 0.6727
Epoch 18/400
164/164 [==============================] - 0s 73us/step - loss: 0.5451 - accuracy: 0.7134 - val_loss: 0.6966 - val_accuracy: 0.6364
Epoch 19/400
164/164 [==============================] - 0s 79us/step - loss: 0.5349 - accuracy: 0.7256 - val_loss: 0.7024 - val_accuracy: 0.5818
Epoch 20/400
164/164 [==============================] - 0s 79us/step - loss: 0.5252 - accuracy: 0.7378 - val_loss: 0.7101 - val_accuracy: 0.6364
Epoch 21/400
164/164 [==============================] - 0s 104us/step - loss: 0.5172 - accuracy: 0.7256 - val_loss: 0.7167 - val_accuracy: 0.6182
Epoch 22/400
164/164 [==============================] - 0s 79us/step - loss: 0.5140 - accuracy: 0.7317 - val_loss: 0.7129 - val_accuracy: 0.6000
Epoch 23/400
164/164 [==============================] - 0s 73us/step - loss: 0.5070 - accuracy: 0.7378 - val_loss: 0.6954 - val_accuracy: 0.6000
Epoch 24/400
164/164 [==============================] - 0s 73us/step - loss: 0.4857 - accuracy: 0.7744 - val_loss: 0.6973 - val_accuracy: 0.5818
Epoch 25/400
164/164 [==============================] - 0s 79us/step - loss: 0.4784 - accuracy: 0.7927 - val_loss: 0.6920 - val_accuracy: 0.6182

Epoch 00025: ReduceLROnPlateau reducing learning rate to 0.001500000013038516.
Epoch 26/400
164/164 [==============================] - 0s 73us/step - loss: 0.4691 - accuracy: 0.8110 - val_loss: 0.6896 - val_accuracy: 0.6364
Epoch 27/400
164/164 [==============================] - 0s 85us/step - loss: 0.4629 - accuracy: 0.8110 - val_loss: 0.6919 - val_accuracy: 0.6182
Epoch 28/400
164/164 [==============================] - 0s 85us/step - loss: 0.4541 - accuracy: 0.8110 - val_loss: 0.6785 - val_accuracy: 0.6364
Epoch 29/400
164/164 [==============================] - 0s 73us/step - loss: 0.4506 - accuracy: 0.8049 - val_loss: 0.6732 - val_accuracy: 0.6364
Epoch 30/400
164/164 [==============================] - 0s 79us/step - loss: 0.4403 - accuracy: 0.8171 - val_loss: 0.6752 - val_accuracy: 0.6182
Epoch 31/400
164/164 [==============================] - 0s 110us/step - loss: 0.4384 - accuracy: 0.8476 - val_loss: 0.6800 - val_accuracy: 0.6182
Epoch 32/400
164/164 [==============================] - 0s 79us/step - loss: 0.4345 - accuracy: 0.8537 - val_loss: 0.6872 - val_accuracy: 0.6182
Epoch 33/400
164/164 [==============================] - 0s 98us/step - loss: 0.4265 - accuracy: 0.8598 - val_loss: 0.6875 - val_accuracy: 0.6545
Epoch 34/400
164/164 [==============================] - 0s 79us/step - loss: 0.4183 - accuracy: 0.8476 - val_loss: 0.6883 - val_accuracy: 0.6727
Epoch 35/400
164/164 [==============================] - 0s 67us/step - loss: 0.4100 - accuracy: 0.8415 - val_loss: 0.6959 - val_accuracy: 0.6727

Epoch 00035: ReduceLROnPlateau reducing learning rate to 0.000750000006519258.
Epoch 36/400
164/164 [==============================] - 0s 67us/step - loss: 0.4034 - accuracy: 0.8293 - val_loss: 0.6946 - val_accuracy: 0.7091
Epoch 37/400
164/164 [==============================] - 0s 67us/step - loss: 0.3991 - accuracy: 0.8293 - val_loss: 0.6968 - val_accuracy: 0.6909
Epoch 38/400
164/164 [==============================] - 0s 85us/step - loss: 0.3958 - accuracy: 0.8293 - val_loss: 0.6987 - val_accuracy: 0.6909
Epoch 39/400
164/164 [==============================] - 0s 97us/step - loss: 0.3918 - accuracy: 0.8354 - val_loss: 0.7003 - val_accuracy: 0.6909
Epoch 40/400
164/164 [==============================] - 0s 79us/step - loss: 0.3892 - accuracy: 0.8354 - val_loss: 0.7088 - val_accuracy: 0.6545
Epoch 41/400
164/164 [==============================] - 0s 73us/step - loss: 0.3852 - accuracy: 0.8354 - val_loss: 0.7138 - val_accuracy: 0.6545
Epoch 42/400
164/164 [==============================] - 0s 79us/step - loss: 0.3819 - accuracy: 0.8476 - val_loss: 0.7122 - val_accuracy: 0.6909
Epoch 43/400
164/164 [==============================] - 0s 79us/step - loss: 0.3787 - accuracy: 0.8598 - val_loss: 0.7089 - val_accuracy: 0.6909
Epoch 44/400
164/164 [==============================] - 0s 73us/step - loss: 0.3763 - accuracy: 0.8598 - val_loss: 0.7081 - val_accuracy: 0.6909
Epoch 45/400
164/164 [==============================] - 0s 73us/step - loss: 0.3726 - accuracy: 0.8598 - val_loss: 0.7091 - val_accuracy: 0.6727
Epoch 46/400
164/164 [==============================] - 0s 73us/step - loss: 0.3678 - accuracy: 0.8598 - val_loss: 0.7040 - val_accuracy: 0.6727

Epoch 00046: ReduceLROnPlateau reducing learning rate to 0.000375000003259629.
Epoch 47/400
164/164 [==============================] - 0s 98us/step - loss: 0.3655 - accuracy: 0.8598 - val_loss: 0.7071 - val_accuracy: 0.6727
Epoch 48/400
164/164 [==============================] - 0s 85us/step - loss: 0.3630 - accuracy: 0.8598 - val_loss: 0.7106 - val_accuracy: 0.6545
Epoch 49/400
164/164 [==============================] - 0s 73us/step - loss: 0.3608 - accuracy: 0.8598 - val_loss: 0.7129 - val_accuracy: 0.6545
Epoch 50/400
164/164 [==============================] - 0s 79us/step - loss: 0.3591 - accuracy: 0.8659 - val_loss: 0.7160 - val_accuracy: 0.6364
Epoch 51/400
164/164 [==============================] - 0s 73us/step - loss: 0.3573 - accuracy: 0.8659 - val_loss: 0.7198 - val_accuracy: 0.6364
Epoch 52/400
164/164 [==============================] - 0s 79us/step - loss: 0.3555 - accuracy: 0.8659 - val_loss: 0.7224 - val_accuracy: 0.6545
Epoch 53/400
164/164 [==============================] - 0s 73us/step - loss: 0.3539 - accuracy: 0.8659 - val_loss: 0.7248 - val_accuracy: 0.6364
Epoch 54/400
164/164 [==============================] - 0s 85us/step - loss: 0.3524 - accuracy: 0.8659 - val_loss: 0.7281 - val_accuracy: 0.6364
Epoch 55/400
164/164 [==============================] - 0s 98us/step - loss: 0.3514 - accuracy: 0.8902 - val_loss: 0.7324 - val_accuracy: 0.6364
Epoch 56/400
164/164 [==============================] - 0s 79us/step - loss: 0.3504 - accuracy: 0.8841 - val_loss: 0.7356 - val_accuracy: 0.6364

Epoch 00056: ReduceLROnPlateau reducing learning rate to 0.0001875000016298145.
Epoch 57/400
164/164 [==============================] - 0s 73us/step - loss: 0.3483 - accuracy: 0.8902 - val_loss: 0.7344 - val_accuracy: 0.6364
Epoch 58/400
164/164 [==============================] - 0s 79us/step - loss: 0.3472 - accuracy: 0.8902 - val_loss: 0.7329 - val_accuracy: 0.6364
Epoch 59/400
164/164 [==============================] - 0s 79us/step - loss: 0.3463 - accuracy: 0.8963 - val_loss: 0.7335 - val_accuracy: 0.6364
Epoch 60/400
164/164 [==============================] - 0s 73us/step - loss: 0.3449 - accuracy: 0.8963 - val_loss: 0.7348 - val_accuracy: 0.6364
Epoch 61/400
164/164 [==============================] - 0s 73us/step - loss: 0.3439 - accuracy: 0.8963 - val_loss: 0.7356 - val_accuracy: 0.6364
Epoch 62/400
164/164 [==============================] - 0s 73us/step - loss: 0.3427 - accuracy: 0.8902 - val_loss: 0.7366 - val_accuracy: 0.6364
Epoch 63/400
164/164 [==============================] - 0s 98us/step - loss: 0.3420 - accuracy: 0.8902 - val_loss: 0.7351 - val_accuracy: 0.6364
Epoch 64/400
164/164 [==============================] - 0s 79us/step - loss: 0.3409 - accuracy: 0.8902 - val_loss: 0.7358 - val_accuracy: 0.6364
Epoch 65/400
164/164 [==============================] - 0s 73us/step - loss: 0.3398 - accuracy: 0.8780 - val_loss: 0.7364 - val_accuracy: 0.6364
Epoch 66/400
164/164 [==============================] - 0s 73us/step - loss: 0.3389 - accuracy: 0.8780 - val_loss: 0.7359 - val_accuracy: 0.6545

Epoch 00066: ReduceLROnPlateau reducing learning rate to 9.375000081490725e-05.
Epoch 67/400
164/164 [==============================] - 0s 128us/step - loss: 0.3380 - accuracy: 0.8841 - val_loss: 0.7359 - val_accuracy: 0.6545
Epoch 68/400
164/164 [==============================] - 0s 79us/step - loss: 0.3373 - accuracy: 0.8841 - val_loss: 0.7361 - val_accuracy: 0.6364
Epoch 69/400
164/164 [==============================] - 0s 79us/step - loss: 0.3372 - accuracy: 0.8902 - val_loss: 0.7360 - val_accuracy: 0.6364
Epoch 70/400
164/164 [==============================] - 0s 73us/step - loss: 0.3364 - accuracy: 0.8902 - val_loss: 0.7358 - val_accuracy: 0.6364
Epoch 71/400
164/164 [==============================] - 0s 79us/step - loss: 0.3359 - accuracy: 0.8902 - val_loss: 0.7358 - val_accuracy: 0.6364
Epoch 72/400
164/164 [==============================] - 0s 91us/step - loss: 0.3356 - accuracy: 0.8841 - val_loss: 0.7359 - val_accuracy: 0.6545
Epoch 73/400
164/164 [==============================] - 0s 85us/step - loss: 0.3349 - accuracy: 0.8841 - val_loss: 0.7369 - val_accuracy: 0.6545
Epoch 74/400
164/164 [==============================] - 0s 67us/step - loss: 0.3343 - accuracy: 0.8841 - val_loss: 0.7377 - val_accuracy: 0.6545
Epoch 75/400
164/164 [==============================] - 0s 85us/step - loss: 0.3341 - accuracy: 0.8780 - val_loss: 0.7388 - val_accuracy: 0.6364
Epoch 76/400
164/164 [==============================] - 0s 85us/step - loss: 0.3336 - accuracy: 0.8780 - val_loss: 0.7396 - val_accuracy: 0.6364

Epoch 00076: ReduceLROnPlateau reducing learning rate to 4.6875000407453626e-05.
Epoch 77/400
164/164 [==============================] - 0s 73us/step - loss: 0.3332 - accuracy: 0.8841 - val_loss: 0.7399 - val_accuracy: 0.6364
Epoch 78/400
164/164 [==============================] - 0s 73us/step - loss: 0.3331 - accuracy: 0.8841 - val_loss: 0.7402 - val_accuracy: 0.6364
Epoch 79/400
164/164 [==============================] - 0s 73us/step - loss: 0.3328 - accuracy: 0.8841 - val_loss: 0.7402 - val_accuracy: 0.6364
Epoch 80/400
164/164 [==============================] - 0s 73us/step - loss: 0.3326 - accuracy: 0.8841 - val_loss: 0.7405 - val_accuracy: 0.6364
Epoch 81/400
164/164 [==============================] - 0s 73us/step - loss: 0.3324 - accuracy: 0.8841 - val_loss: 0.7409 - val_accuracy: 0.6364
Epoch 82/400
164/164 [==============================] - 0s 79us/step - loss: 0.3322 - accuracy: 0.8841 - val_loss: 0.7409 - val_accuracy: 0.6364
Epoch 83/400
164/164 [==============================] - 0s 79us/step - loss: 0.3319 - accuracy: 0.8902 - val_loss: 0.7408 - val_accuracy: 0.6364
Epoch 84/400
164/164 [==============================] - 0s 91us/step - loss: 0.3317 - accuracy: 0.8902 - val_loss: 0.7409 - val_accuracy: 0.6364
Epoch 85/400
164/164 [==============================] - 0s 79us/step - loss: 0.3315 - accuracy: 0.8902 - val_loss: 0.7411 - val_accuracy: 0.6364
Epoch 86/400
164/164 [==============================] - 0s 73us/step - loss: 0.3312 - accuracy: 0.8902 - val_loss: 0.7412 - val_accuracy: 0.6364

Epoch 00086: ReduceLROnPlateau reducing learning rate to 2.3437500203726813e-05.
Epoch 87/400
164/164 [==============================] - 0s 79us/step - loss: 0.3310 - accuracy: 0.8902 - val_loss: 0.7412 - val_accuracy: 0.6364
Epoch 88/400
164/164 [==============================] - 0s 104us/step - loss: 0.3309 - accuracy: 0.8902 - val_loss: 0.7412 - val_accuracy: 0.6364
Epoch 89/400
164/164 [==============================] - 0s 104us/step - loss: 0.3307 - accuracy: 0.8902 - val_loss: 0.7412 - val_accuracy: 0.6364
Epoch 90/400
164/164 [==============================] - 0s 91us/step - loss: 0.3306 - accuracy: 0.8902 - val_loss: 0.7413 - val_accuracy: 0.6364
Epoch 91/400
164/164 [==============================] - 0s 98us/step - loss: 0.3305 - accuracy: 0.8902 - val_loss: 0.7415 - val_accuracy: 0.6364
Epoch 92/400
164/164 [==============================] - 0s 73us/step - loss: 0.3305 - accuracy: 0.8963 - val_loss: 0.7418 - val_accuracy: 0.6364
Epoch 93/400
164/164 [==============================] - 0s 79us/step - loss: 0.3304 - accuracy: 0.8963 - val_loss: 0.7419 - val_accuracy: 0.6364
Epoch 94/400
164/164 [==============================] - 0s 79us/step - loss: 0.3302 - accuracy: 0.8963 - val_loss: 0.7420 - val_accuracy: 0.6364
Epoch 95/400
164/164 [==============================] - 0s 91us/step - loss: 0.3301 - accuracy: 0.8963 - val_loss: 0.7422 - val_accuracy: 0.6364
Epoch 96/400
164/164 [==============================] - 0s 79us/step - loss: 0.3300 - accuracy: 0.8963 - val_loss: 0.7422 - val_accuracy: 0.6545

Epoch 00096: ReduceLROnPlateau reducing learning rate to 1.1718750101863407e-05.
Epoch 97/400
164/164 [==============================] - 0s 79us/step - loss: 0.3299 - accuracy: 0.8963 - val_loss: 0.7423 - val_accuracy: 0.6545
Epoch 98/400
164/164 [==============================] - 0s 79us/step - loss: 0.3298 - accuracy: 0.8963 - val_loss: 0.7423 - val_accuracy: 0.6545
Epoch 99/400
164/164 [==============================] - 0s 91us/step - loss: 0.3298 - accuracy: 0.8963 - val_loss: 0.7422 - val_accuracy: 0.6545
Epoch 100/400
164/164 [==============================] - 0s 79us/step - loss: 0.3297 - accuracy: 0.8963 - val_loss: 0.7422 - val_accuracy: 0.6545
Epoch 101/400
164/164 [==============================] - 0s 79us/step - loss: 0.3297 - accuracy: 0.8963 - val_loss: 0.7422 - val_accuracy: 0.6545
Epoch 102/400
164/164 [==============================] - 0s 79us/step - loss: 0.3296 - accuracy: 0.8963 - val_loss: 0.7423 - val_accuracy: 0.6545
Epoch 103/400
164/164 [==============================] - 0s 73us/step - loss: 0.3296 - accuracy: 0.8963 - val_loss: 0.7423 - val_accuracy: 0.6545
Epoch 104/400
164/164 [==============================] - 0s 97us/step - loss: 0.3295 - accuracy: 0.8963 - val_loss: 0.7423 - val_accuracy: 0.6545
Epoch 105/400
164/164 [==============================] - 0s 79us/step - loss: 0.3295 - accuracy: 0.8963 - val_loss: 0.7424 - val_accuracy: 0.6545
Epoch 106/400
164/164 [==============================] - 0s 67us/step - loss: 0.3294 - accuracy: 0.8963 - val_loss: 0.7424 - val_accuracy: 0.6545

Epoch 00106: ReduceLROnPlateau reducing learning rate to 5.859375050931703e-06.
Epoch 107/400
164/164 [==============================] - 0s 91us/step - loss: 0.3294 - accuracy: 0.8963 - val_loss: 0.7424 - val_accuracy: 0.6545
Epoch 108/400
164/164 [==============================] - 0s 85us/step - loss: 0.3293 - accuracy: 0.8963 - val_loss: 0.7424 - val_accuracy: 0.6545
Epoch 109/400
164/164 [==============================] - 0s 79us/step - loss: 0.3293 - accuracy: 0.8963 - val_loss: 0.7425 - val_accuracy: 0.6545
Epoch 110/400
164/164 [==============================] - 0s 79us/step - loss: 0.3293 - accuracy: 0.8963 - val_loss: 0.7425 - val_accuracy: 0.6545
Epoch 111/400
164/164 [==============================] - 0s 91us/step - loss: 0.3292 - accuracy: 0.8963 - val_loss: 0.7425 - val_accuracy: 0.6545
Epoch 112/400
164/164 [==============================] - 0s 79us/step - loss: 0.3292 - accuracy: 0.8963 - val_loss: 0.7425 - val_accuracy: 0.6545
Epoch 113/400
164/164 [==============================] - 0s 79us/step - loss: 0.3292 - accuracy: 0.8963 - val_loss: 0.7425 - val_accuracy: 0.6545
Epoch 114/400
164/164 [==============================] - 0s 85us/step - loss: 0.3292 - accuracy: 0.8963 - val_loss: 0.7425 - val_accuracy: 0.6545
Epoch 115/400
164/164 [==============================] - 0s 85us/step - loss: 0.3291 - accuracy: 0.8963 - val_loss: 0.7425 - val_accuracy: 0.6545
Epoch 116/400
164/164 [==============================] - 0s 85us/step - loss: 0.3291 - accuracy: 0.8963 - val_loss: 0.7425 - val_accuracy: 0.6545

Epoch 00116: ReduceLROnPlateau reducing learning rate to 2.9296875254658516e-06.
Epoch 117/400
164/164 [==============================] - 0s 73us/step - loss: 0.3291 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 118/400
164/164 [==============================] - 0s 85us/step - loss: 0.3291 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 119/400
164/164 [==============================] - 0s 79us/step - loss: 0.3290 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 120/400
164/164 [==============================] - 0s 73us/step - loss: 0.3290 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 121/400
164/164 [==============================] - 0s 79us/step - loss: 0.3290 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 122/400
164/164 [==============================] - 0s 98us/step - loss: 0.3290 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 123/400
164/164 [==============================] - 0s 79us/step - loss: 0.3290 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 124/400
164/164 [==============================] - 0s 73us/step - loss: 0.3290 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 125/400
164/164 [==============================] - 0s 73us/step - loss: 0.3290 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 126/400
164/164 [==============================] - 0s 104us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7425 - val_accuracy: 0.6545

Epoch 00126: ReduceLROnPlateau reducing learning rate to 1.4648437627329258e-06.
Epoch 127/400
164/164 [==============================] - 0s 79us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7425 - val_accuracy: 0.6545
Epoch 128/400
164/164 [==============================] - 0s 79us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7425 - val_accuracy: 0.6545
Epoch 129/400
164/164 [==============================] - 0s 79us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7425 - val_accuracy: 0.6545
Epoch 130/400
164/164 [==============================] - 0s 134us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 131/400
164/164 [==============================] - 0s 98us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 132/400
164/164 [==============================] - 0s 85us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7425 - val_accuracy: 0.6545
Epoch 133/400
164/164 [==============================] - 0s 73us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 134/400
164/164 [==============================] - 0s 73us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 135/400
164/164 [==============================] - 0s 73us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 136/400
164/164 [==============================] - 0s 104us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00136: ReduceLROnPlateau reducing learning rate to 7.324218813664629e-07.
Epoch 137/400
164/164 [==============================] - 0s 73us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 138/400
164/164 [==============================] - 0s 73us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 139/400
164/164 [==============================] - 0s 73us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 140/400
164/164 [==============================] - 0s 116us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 141/400
164/164 [==============================] - 0s 91us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 142/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 143/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 144/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 145/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 146/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00146: ReduceLROnPlateau reducing learning rate to 3.6621094068323146e-07.
Epoch 147/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 148/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 149/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 150/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 151/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 152/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 153/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 154/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 155/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 156/400
164/164 [==============================] - 0s 110us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00156: ReduceLROnPlateau reducing learning rate to 1.8310547034161573e-07.
Epoch 157/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 158/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 159/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 160/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 161/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 162/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 163/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 164/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 165/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 166/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00166: ReduceLROnPlateau reducing learning rate to 9.155273517080786e-08.
Epoch 167/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 168/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 169/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 170/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 171/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 172/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 173/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 174/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 175/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 176/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00176: ReduceLROnPlateau reducing learning rate to 4.577636758540393e-08.
Epoch 177/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 178/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 179/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 180/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 181/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 182/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 183/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 184/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 185/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 186/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00186: ReduceLROnPlateau reducing learning rate to 2.2888183792701966e-08.
Epoch 187/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 188/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 189/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 190/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 191/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 192/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 193/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 194/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 195/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 196/400
164/164 [==============================] - 0s 110us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00196: ReduceLROnPlateau reducing learning rate to 1.1444091896350983e-08.
Epoch 197/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 198/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 199/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 200/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 201/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 202/400
164/164 [==============================] - 0s 110us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 203/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 204/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 205/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 206/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00206: ReduceLROnPlateau reducing learning rate to 5.7220459481754915e-09.
Epoch 207/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 208/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 209/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 210/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 211/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 212/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 213/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 214/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 215/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 216/400
164/164 [==============================] - 0s 122us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00216: ReduceLROnPlateau reducing learning rate to 2.8610229740877458e-09.
Epoch 217/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 218/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 219/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 220/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 221/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 222/400
164/164 [==============================] - 0s 134us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 223/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 224/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 225/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 226/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00226: ReduceLROnPlateau reducing learning rate to 1.4305114870438729e-09.
Epoch 227/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 228/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 229/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 230/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 231/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 232/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 233/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 234/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 235/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 236/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00236: ReduceLROnPlateau reducing learning rate to 7.152557435219364e-10.
Epoch 237/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 238/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 239/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 240/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 241/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 242/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 243/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 244/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 245/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 246/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00246: ReduceLROnPlateau reducing learning rate to 3.576278717609682e-10.
Epoch 247/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 248/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 249/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 250/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 251/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 252/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 253/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 254/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 255/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 256/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00256: ReduceLROnPlateau reducing learning rate to 1.788139358804841e-10.
Epoch 257/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 258/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 259/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 260/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 261/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 262/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 263/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 264/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 265/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 266/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00266: ReduceLROnPlateau reducing learning rate to 8.940696794024205e-11.
Epoch 267/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 268/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 269/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 270/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 271/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 272/400
164/164 [==============================] - 0s 110us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 273/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 274/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 275/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 276/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00276: ReduceLROnPlateau reducing learning rate to 4.470348397012103e-11.
Epoch 277/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 278/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 279/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 280/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 281/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 282/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 283/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 284/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 285/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 286/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00286: ReduceLROnPlateau reducing learning rate to 2.2351741985060514e-11.
Epoch 287/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 288/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 289/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 290/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 291/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 292/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 293/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 294/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 295/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 296/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00296: ReduceLROnPlateau reducing learning rate to 1.1175870992530257e-11.
Epoch 297/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 298/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 299/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 300/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 301/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 302/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 303/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 304/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 305/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 306/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00306: ReduceLROnPlateau reducing learning rate to 5.5879354962651284e-12.
Epoch 307/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 308/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 309/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 310/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 311/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 312/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 313/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 314/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 315/400
164/164 [==============================] - 0s 110us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 316/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00316: ReduceLROnPlateau reducing learning rate to 2.7939677481325642e-12.
Epoch 317/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 318/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 319/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 320/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 321/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 322/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 323/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 324/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 325/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 326/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00326: ReduceLROnPlateau reducing learning rate to 1.3969838740662821e-12.
Epoch 327/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 328/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 329/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 330/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 331/400
164/164 [==============================] - ETA: 0s - loss: 0.3229 - accuracy: 0.90 - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 332/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 333/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 334/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 335/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 336/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00336: ReduceLROnPlateau reducing learning rate to 6.984919370331411e-13.
Epoch 337/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 338/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 339/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 340/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 341/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 342/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 343/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 344/400
164/164 [==============================] - 0s 110us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 345/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 346/400
164/164 [==============================] - 0s 97us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00346: ReduceLROnPlateau reducing learning rate to 3.4924596851657053e-13.
Epoch 347/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 348/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 349/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 350/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 351/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 352/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 353/400
164/164 [==============================] - 0s 134us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 354/400
164/164 [==============================] - 0s 158us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 355/400
164/164 [==============================] - 0s 140us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 356/400
164/164 [==============================] - 0s 122us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00356: ReduceLROnPlateau reducing learning rate to 1.7462298425828526e-13.
Epoch 357/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 358/400
164/164 [==============================] - 0s 134us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 359/400
164/164 [==============================] - 0s 110us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 360/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 361/400
164/164 [==============================] - 0s 128us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 362/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 363/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 364/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 365/400
164/164 [==============================] - 0s 128us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 366/400
164/164 [==============================] - 0s 110us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00366: ReduceLROnPlateau reducing learning rate to 8.731149212914263e-14.
Epoch 367/400
164/164 [==============================] - 0s 110us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 368/400
164/164 [==============================] - 0s 128us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 369/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 370/400
164/164 [==============================] - 0s 110us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 371/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 372/400
164/164 [==============================] - 0s 128us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 373/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 374/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 375/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 376/400
164/164 [==============================] - 0s 134us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00376: ReduceLROnPlateau reducing learning rate to 4.3655746064571316e-14.
Epoch 377/400
164/164 [==============================] - 0s 128us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 378/400
164/164 [==============================] - 0s 110us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 379/400
164/164 [==============================] - 0s 134us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 380/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 381/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 382/400
164/164 [==============================] - 0s 110us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 383/400
164/164 [==============================] - 0s 134us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 384/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 385/400
164/164 [==============================] - 0s 110us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 386/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00386: ReduceLROnPlateau reducing learning rate to 2.1827873032285658e-14.
Epoch 387/400
164/164 [==============================] - 0s 134us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 388/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 389/400
164/164 [==============================] - 0s 110us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 390/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 391/400
164/164 [==============================] - 0s 128us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 392/400
164/164 [==============================] - 0s 122us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 393/400
164/164 [==============================] - 0s 122us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 394/400
164/164 [==============================] - 0s 140us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 395/400
164/164 [==============================] - 0s 122us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 396/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00396: ReduceLROnPlateau reducing learning rate to 1.0913936516142829e-14.
Epoch 397/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 398/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 399/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 400/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
In [145]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 400)
In [146]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
55/55 [==============================] - 0s 55us/step
test loss: 0.7425676215778697, test accuracy: 0.6545454263687134
In [147]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.6399456521739131
In [148]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
Kappa:  0.2943956785955435

KMeans

In [149]:
X
Out[149]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13
0 0.992062 -0.477172 -1.079451 -2.369470 -1.705431 -0.098594 -0.281836 -1.432001 -0.898623 0.130446 -0.024683 -0.312128 0.020392
1 0.843575 -0.507672 -0.731713 -0.334904 1.442336 -0.491141 -0.266416 -0.511246 1.004414 0.558777 0.127114 -1.667555 0.835458
2 0.816922 -0.263544 0.639646 -0.865417 1.276602 -0.245238 0.106722 -0.761365 -0.170481 -1.443667 -0.451102 1.196430 -0.037846
3 4.368525 0.851784 -0.671158 -0.128467 2.141169 -0.472725 -1.437233 -1.858760 1.581800 -0.145852 0.107228 1.458238 1.666081
4 0.001312 0.535305 -0.648296 0.221414 0.549478 0.736878 -0.439538 -0.138787 0.584258 0.095671 1.901833 2.909252 1.802578
5 -0.236754 0.488978 0.203743 0.088401 -0.151814 0.811707 -0.092973 0.153518 -0.936863 0.354100 0.123352 1.318569 1.097711
6 -0.842496 0.742173 0.068601 1.394492 -0.276167 1.301853 0.336343 1.077540 -1.118983 1.688235 -0.103661 1.224883 0.350956
7 -0.952702 1.078642 -0.563379 -0.018149 -0.073042 -0.591301 -1.392389 0.209234 0.725065 0.064350 0.034449 0.581953 2.151966
8 0.046457 -0.093025 -0.804385 0.542662 -0.130939 0.042792 1.198959 -0.559116 0.017192 -0.249308 0.747851 -0.035599 0.995166
9 -0.781158 0.099463 0.196737 2.462131 0.316140 -0.369698 2.196715 -0.800443 2.137687 1.438443 0.055279 -0.284437 1.702942
10 -0.906167 0.568017 0.700382 2.876646 -0.809125 -0.491839 1.801564 -2.406947 1.939246 1.397556 0.709408 -0.423394 1.773713
11 1.172687 1.292213 -0.402038 0.087342 0.324539 0.973336 -0.548282 0.781195 0.846038 0.464514 -1.030463 -0.559243 0.168727
12 0.367875 1.949889 0.516382 0.657124 -0.534306 0.575187 -0.750861 0.247200 -0.232297 0.332174 -0.426787 0.318763 0.083316
13 1.270520 1.194102 0.267933 0.676186 0.394734 -0.709975 -0.047626 1.113385 0.339962 0.424937 -0.528480 0.671225 0.078062
14 -0.095931 0.792392 0.626113 0.189989 0.315198 -0.175744 0.011713 -0.072196 0.742338 0.974567 0.935685 0.083454 0.970157
15 -0.322645 0.977766 0.685697 0.670670 0.997903 0.619018 0.498110 -0.016728 0.445370 -0.102204 0.199517 -0.315303 0.347920
16 0.565974 0.440551 0.402995 1.815814 1.906139 1.105013 1.256180 0.907086 0.592851 -0.159427 1.013051 -0.620202 1.259932
17 -0.863540 0.887127 1.387720 -0.082168 -0.694633 -0.810037 1.251697 -0.443532 0.307506 0.253798 -0.292483 0.030812 0.176350
18 -0.822258 -0.630193 -0.672294 -0.279417 -0.731983 -1.510167 -1.393705 -0.161872 0.722297 0.910604 -0.610303 0.380547 1.296315
19 -0.889164 0.641922 2.278761 0.190213 -0.341231 -0.624107 1.228820 -0.549441 -0.662942 0.481866 -0.541347 -1.061735 -0.122227
20 0.795964 0.484784 0.898919 0.027625 0.415359 0.271286 0.366966 -0.498975 0.300352 0.216702 0.361195 -0.771976 0.085971
21 0.168183 -0.077353 1.019887 -0.637065 0.731534 0.877245 1.225125 -0.566997 -0.452222 -1.105384 0.185636 -0.782808 -0.224975
22 0.510023 -0.099060 0.064384 -0.039933 0.786951 0.119530 -0.259052 -0.881354 -0.113425 1.191274 0.335443 -0.189618 -0.337688
23 0.216210 -0.069447 0.974822 -0.626273 0.835854 0.914236 1.226463 -0.369525 -0.398299 -1.146613 0.026274 -0.944475 -0.192948
24 -0.239273 -0.518568 -0.127834 0.045011 0.403223 0.368253 -0.584902 -0.905436 -0.405699 0.129383 0.809611 -0.174138 -0.115393
25 -1.241907 1.355534 -0.693470 0.793789 0.606007 0.930263 0.009323 -0.712463 0.037916 -0.182143 1.212760 -0.083882 0.639662
26 -0.847436 1.180146 -0.489592 1.189572 -0.457645 -0.163979 -0.010812 -0.765561 -0.347488 -0.216575 0.804302 -0.236378 0.481212
27 -0.378383 1.017722 -1.812001 0.443514 0.583209 1.709730 0.715521 -0.076610 0.416120 0.013436 0.420025 -0.925263 0.626400
28 0.245370 1.187084 1.056929 2.013063 -0.505622 1.228583 -1.158143 0.622932 0.113512 0.948397 0.008252 1.035839 -0.691702
29 -0.623386 1.368898 1.216933 1.961377 0.744541 1.555516 -1.205283 -0.252995 -0.325624 0.538668 0.197646 0.356450 -0.219812
... ... ... ... ... ... ... ... ... ... ... ... ... ...
189 -0.565077 0.809784 0.557457 0.815038 0.823053 -0.931359 -0.039244 -0.199068 0.083690 -0.235063 -0.030800 -0.564557 -0.253507
190 -0.602848 0.638838 0.763481 -0.424641 -0.810302 -0.951734 -0.732024 -0.504038 0.379372 0.748895 -0.593820 -0.772491 0.175752
191 -1.094031 -0.896961 0.400325 -1.635971 -1.099938 -1.091799 -0.593281 0.890889 0.984647 0.584509 0.318496 0.175062 -0.783524
192 -0.348357 0.944340 0.239675 0.003612 -1.370450 -0.996597 -0.616405 0.161481 -0.258760 0.534721 -0.431338 0.376456 -1.623026
193 2.110671 -1.005236 0.268022 0.459390 -1.985350 0.405677 -0.361571 -1.272053 -0.873345 2.111218 -0.246708 0.798456 1.067252
194 1.222194 -1.600122 -1.149302 0.230839 -0.213026 -1.572114 0.486447 -0.770701 0.244895 2.689114 -2.296486 0.718338 -1.220356
195 -0.509789 -0.757711 0.189267 0.516644 0.750906 -1.485714 2.485824 -1.204754 -3.373113 -0.450016 -1.091178 -0.474728 -0.522197
196 0.194175 -0.618441 -1.090420 0.233017 -1.492602 -0.342192 -1.612833 0.714990 0.072755 -0.026932 0.464029 0.212333 1.204262
197 0.297635 -0.727616 -1.927078 -0.145347 -0.990256 0.052935 -1.791108 -0.351333 -0.064903 0.201842 1.581215 1.084453 -0.168841
198 -0.271030 -0.575137 -1.005334 -0.238705 -0.931830 -1.319114 -0.668613 0.510822 0.209623 0.487577 0.154874 0.133768 1.259548
199 0.059096 -0.370313 -0.760047 0.706270 -2.488266 -1.336692 -0.683584 0.436366 -0.150281 -0.711308 -0.851205 0.253942 -0.052516
200 0.147539 -0.233608 -0.578016 0.870637 -2.418094 -1.286070 -0.692623 0.342693 0.015890 -0.795418 -1.221248 0.309493 -0.526480
201 -0.076214 -1.055629 0.159389 -0.403318 -0.111273 -1.325990 -0.867502 0.519381 0.192007 -0.024629 0.220420 0.551046 0.399728
202 1.468986 0.518464 1.475456 -1.400891 0.408186 -1.831201 1.474742 0.566660 -0.403197 -1.295176 -0.443787 -1.884346 -1.993491
203 -1.739107 0.192104 -0.670709 -1.236237 -1.672915 -0.680127 0.027148 0.524909 1.865754 -0.634310 -0.607429 -1.471191 -0.632982
204 -0.663868 -0.862566 -0.329803 -0.857680 0.167824 -0.013328 0.176565 0.125832 0.609671 -1.296827 -0.435986 -1.341223 -0.977207
205 -0.739818 -0.668220 -0.077479 0.026286 0.027801 0.040659 -0.161646 -1.046948 -1.248976 -0.449243 1.046834 1.381194 1.646325
206 0.475752 0.695473 -0.072097 1.081397 -0.366985 -2.008080 0.515734 0.005330 1.193800 -0.841825 -2.650200 -3.862624 -2.115507
207 -1.331365 -1.632552 -0.876636 0.076190 1.187799 1.138590 1.235955 1.583447 0.890342 -1.587964 0.546109 1.565567 1.756993
208 -0.397476 0.090963 1.217996 0.773741 1.107204 -1.125870 -0.915396 -1.130561 -1.914456 -0.664474 -0.226576 0.112420 0.235011
209 -0.465823 -1.372705 -0.445436 0.316510 -1.492946 -1.103783 0.353513 -0.311377 -1.095388 -0.615078 -0.585868 0.172807 -0.860564
210 -0.594535 -1.761364 -1.069906 -0.502969 -1.411276 -0.906350 -0.559102 -1.240920 -2.254196 -1.206339 -0.528047 0.924112 0.472298
211 -1.022693 0.373374 -0.104205 -0.815628 -0.574733 0.906934 0.765114 -0.015386 0.110695 1.832325 0.712557 -0.951976 -0.678869
212 -0.967902 0.155275 0.013938 -0.549105 -0.907792 0.881907 0.609589 -0.135010 -0.373473 1.152134 0.386511 -0.744687 -0.447017
213 -1.238242 -0.062983 -0.133082 -0.158458 -0.338086 -0.411874 0.964537 0.870379 0.530337 0.858339 0.489332 -1.190977 -1.340484
214 0.349761 -1.391267 -3.069473 0.840195 1.044391 -1.052018 1.004856 1.478511 1.210060 -1.145325 2.653757 1.937234 0.592139
215 0.782819 -1.300386 -0.487318 0.850960 -2.046427 1.050631 0.289069 2.400271 2.707288 -0.278238 0.152360 1.912210 -0.208225
216 1.847553 -1.059174 -0.808403 0.400706 -0.275009 0.409744 -0.141885 0.706348 0.476002 0.990111 -0.168504 0.856440 -0.395652
217 2.608478 0.174234 2.534211 -0.985597 -0.436400 3.751943 1.560179 -2.367095 1.272529 2.464209 -0.954336 0.310720 -1.209456
218 -0.069569 0.418008 -0.004324 1.330358 0.365352 -0.582788 -0.527444 -0.298114 -0.353021 -1.118883 -0.459230 -0.986241 -0.041010

219 rows × 13 columns

In [150]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[150]:
[2847.0,
 2572.5760570812117,
 2370.209947155015,
 2235.6129406180157,
 2112.951551625758,
 2041.1809211260454,
 1982.3615393500422,
 1899.0667595696164,
 1851.9267246215204,
 1760.4468946465518,
 1745.79714786859,
 1689.1350809615656,
 1657.4940102564742,
 1625.370413913055]
In [151]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[151]:
[<matplotlib.lines.Line2D at 0x1e82d2ffda0>]

K=3

In [152]:
kmeans_mfcc = KMeans(n_clusters=3, random_state=0, n_init=10)
kmeans_mfcc.fit(X)
Out[152]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=3, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [153]:
kmeans_mfcc.labels_
Out[153]:
array([1, 2, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 2,
       1, 2, 1, 0, 0, 0, 0, 0, 1, 0, 2, 0, 1, 1, 1, 2, 2, 2, 1, 1, 1, 0,
       0, 0, 1, 1, 2, 0, 2, 2, 0, 0, 0, 1, 1, 0, 1, 1, 1, 2, 0, 0, 1, 1,
       1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 2,
       0, 1, 2, 2, 2, 2, 1, 1, 1, 2, 2, 0, 1, 0, 2, 2, 2, 2, 1, 2, 2, 2,
       2, 0, 1, 2, 0, 0, 2, 2, 2, 2, 1, 1, 0, 0, 0, 2, 2, 2, 2, 2, 0, 0,
       0, 0, 1, 0, 2, 1, 1, 1, 2, 0, 1, 0, 0, 1, 1, 1, 2, 0, 1, 2, 2, 2,
       1, 1, 2, 2, 1, 2, 1, 1, 1, 1, 1, 0, 1, 0, 0, 2, 0, 2, 2, 2, 0, 2,
       2, 1, 2, 0, 0, 0, 0, 0, 0, 0, 2, 0, 1, 0, 1, 2, 1, 1, 1, 2, 1, 1,
       1, 1, 1, 1, 2, 2, 2, 1, 2, 0, 1, 1, 1, 2, 2, 2, 0, 0, 1, 0, 1])
In [154]:
clusters_mfcc = kmeans_mfcc.predict(X)
clusters_mfcc
Out[154]:
array([1, 2, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 2,
       1, 2, 1, 0, 0, 0, 0, 0, 1, 0, 2, 0, 1, 1, 1, 2, 2, 2, 1, 1, 1, 0,
       0, 0, 1, 1, 2, 0, 2, 2, 0, 0, 0, 1, 1, 0, 1, 1, 1, 2, 0, 0, 1, 1,
       1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 2,
       0, 1, 2, 2, 2, 2, 1, 1, 1, 2, 2, 0, 1, 0, 2, 2, 2, 2, 1, 2, 2, 2,
       2, 0, 1, 2, 0, 0, 2, 2, 2, 2, 1, 1, 0, 0, 0, 2, 2, 2, 2, 2, 0, 0,
       0, 0, 1, 0, 2, 1, 1, 1, 2, 0, 1, 0, 0, 1, 1, 1, 2, 0, 1, 2, 2, 2,
       1, 1, 2, 2, 1, 2, 1, 1, 1, 1, 1, 0, 1, 0, 0, 2, 0, 2, 2, 2, 0, 2,
       2, 1, 2, 0, 0, 0, 0, 0, 0, 0, 2, 0, 1, 0, 1, 2, 1, 1, 1, 2, 1, 1,
       1, 1, 1, 1, 2, 2, 2, 1, 2, 0, 1, 1, 1, 2, 2, 2, 0, 0, 1, 0, 1])
In [155]:
X.loc[:,'Cluster'] = clusters_mfcc
X.loc[:,'chosen'] = list(y)
In [156]:
X
Out[156]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13 Cluster chosen
0 0.992062 -0.477172 -1.079451 -2.369470 -1.705431 -0.098594 -0.281836 -1.432001 -0.898623 0.130446 -0.024683 -0.312128 0.020392 1 0
1 0.843575 -0.507672 -0.731713 -0.334904 1.442336 -0.491141 -0.266416 -0.511246 1.004414 0.558777 0.127114 -1.667555 0.835458 2 0
2 0.816922 -0.263544 0.639646 -0.865417 1.276602 -0.245238 0.106722 -0.761365 -0.170481 -1.443667 -0.451102 1.196430 -0.037846 1 0
3 4.368525 0.851784 -0.671158 -0.128467 2.141169 -0.472725 -1.437233 -1.858760 1.581800 -0.145852 0.107228 1.458238 1.666081 1 0
4 0.001312 0.535305 -0.648296 0.221414 0.549478 0.736878 -0.439538 -0.138787 0.584258 0.095671 1.901833 2.909252 1.802578 0 0
5 -0.236754 0.488978 0.203743 0.088401 -0.151814 0.811707 -0.092973 0.153518 -0.936863 0.354100 0.123352 1.318569 1.097711 0 0
6 -0.842496 0.742173 0.068601 1.394492 -0.276167 1.301853 0.336343 1.077540 -1.118983 1.688235 -0.103661 1.224883 0.350956 0 0
7 -0.952702 1.078642 -0.563379 -0.018149 -0.073042 -0.591301 -1.392389 0.209234 0.725065 0.064350 0.034449 0.581953 2.151966 1 0
8 0.046457 -0.093025 -0.804385 0.542662 -0.130939 0.042792 1.198959 -0.559116 0.017192 -0.249308 0.747851 -0.035599 0.995166 0 0
9 -0.781158 0.099463 0.196737 2.462131 0.316140 -0.369698 2.196715 -0.800443 2.137687 1.438443 0.055279 -0.284437 1.702942 0 0
10 -0.906167 0.568017 0.700382 2.876646 -0.809125 -0.491839 1.801564 -2.406947 1.939246 1.397556 0.709408 -0.423394 1.773713 0 0
11 1.172687 1.292213 -0.402038 0.087342 0.324539 0.973336 -0.548282 0.781195 0.846038 0.464514 -1.030463 -0.559243 0.168727 0 0
12 0.367875 1.949889 0.516382 0.657124 -0.534306 0.575187 -0.750861 0.247200 -0.232297 0.332174 -0.426787 0.318763 0.083316 0 0
13 1.270520 1.194102 0.267933 0.676186 0.394734 -0.709975 -0.047626 1.113385 0.339962 0.424937 -0.528480 0.671225 0.078062 0 0
14 -0.095931 0.792392 0.626113 0.189989 0.315198 -0.175744 0.011713 -0.072196 0.742338 0.974567 0.935685 0.083454 0.970157 0 0
15 -0.322645 0.977766 0.685697 0.670670 0.997903 0.619018 0.498110 -0.016728 0.445370 -0.102204 0.199517 -0.315303 0.347920 0 0
16 0.565974 0.440551 0.402995 1.815814 1.906139 1.105013 1.256180 0.907086 0.592851 -0.159427 1.013051 -0.620202 1.259932 0 0
17 -0.863540 0.887127 1.387720 -0.082168 -0.694633 -0.810037 1.251697 -0.443532 0.307506 0.253798 -0.292483 0.030812 0.176350 0 0
18 -0.822258 -0.630193 -0.672294 -0.279417 -0.731983 -1.510167 -1.393705 -0.161872 0.722297 0.910604 -0.610303 0.380547 1.296315 1 0
19 -0.889164 0.641922 2.278761 0.190213 -0.341231 -0.624107 1.228820 -0.549441 -0.662942 0.481866 -0.541347 -1.061735 -0.122227 2 0
20 0.795964 0.484784 0.898919 0.027625 0.415359 0.271286 0.366966 -0.498975 0.300352 0.216702 0.361195 -0.771976 0.085971 0 0
21 0.168183 -0.077353 1.019887 -0.637065 0.731534 0.877245 1.225125 -0.566997 -0.452222 -1.105384 0.185636 -0.782808 -0.224975 2 0
22 0.510023 -0.099060 0.064384 -0.039933 0.786951 0.119530 -0.259052 -0.881354 -0.113425 1.191274 0.335443 -0.189618 -0.337688 1 0
23 0.216210 -0.069447 0.974822 -0.626273 0.835854 0.914236 1.226463 -0.369525 -0.398299 -1.146613 0.026274 -0.944475 -0.192948 2 0
24 -0.239273 -0.518568 -0.127834 0.045011 0.403223 0.368253 -0.584902 -0.905436 -0.405699 0.129383 0.809611 -0.174138 -0.115393 1 0
25 -1.241907 1.355534 -0.693470 0.793789 0.606007 0.930263 0.009323 -0.712463 0.037916 -0.182143 1.212760 -0.083882 0.639662 0 0
26 -0.847436 1.180146 -0.489592 1.189572 -0.457645 -0.163979 -0.010812 -0.765561 -0.347488 -0.216575 0.804302 -0.236378 0.481212 0 0
27 -0.378383 1.017722 -1.812001 0.443514 0.583209 1.709730 0.715521 -0.076610 0.416120 0.013436 0.420025 -0.925263 0.626400 0 0
28 0.245370 1.187084 1.056929 2.013063 -0.505622 1.228583 -1.158143 0.622932 0.113512 0.948397 0.008252 1.035839 -0.691702 0 0
29 -0.623386 1.368898 1.216933 1.961377 0.744541 1.555516 -1.205283 -0.252995 -0.325624 0.538668 0.197646 0.356450 -0.219812 0 0
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
189 -0.565077 0.809784 0.557457 0.815038 0.823053 -0.931359 -0.039244 -0.199068 0.083690 -0.235063 -0.030800 -0.564557 -0.253507 0 1
190 -0.602848 0.638838 0.763481 -0.424641 -0.810302 -0.951734 -0.732024 -0.504038 0.379372 0.748895 -0.593820 -0.772491 0.175752 1 1
191 -1.094031 -0.896961 0.400325 -1.635971 -1.099938 -1.091799 -0.593281 0.890889 0.984647 0.584509 0.318496 0.175062 -0.783524 2 1
192 -0.348357 0.944340 0.239675 0.003612 -1.370450 -0.996597 -0.616405 0.161481 -0.258760 0.534721 -0.431338 0.376456 -1.623026 1 1
193 2.110671 -1.005236 0.268022 0.459390 -1.985350 0.405677 -0.361571 -1.272053 -0.873345 2.111218 -0.246708 0.798456 1.067252 1 1
194 1.222194 -1.600122 -1.149302 0.230839 -0.213026 -1.572114 0.486447 -0.770701 0.244895 2.689114 -2.296486 0.718338 -1.220356 1 1
195 -0.509789 -0.757711 0.189267 0.516644 0.750906 -1.485714 2.485824 -1.204754 -3.373113 -0.450016 -1.091178 -0.474728 -0.522197 2 1
196 0.194175 -0.618441 -1.090420 0.233017 -1.492602 -0.342192 -1.612833 0.714990 0.072755 -0.026932 0.464029 0.212333 1.204262 1 1
197 0.297635 -0.727616 -1.927078 -0.145347 -0.990256 0.052935 -1.791108 -0.351333 -0.064903 0.201842 1.581215 1.084453 -0.168841 1 1
198 -0.271030 -0.575137 -1.005334 -0.238705 -0.931830 -1.319114 -0.668613 0.510822 0.209623 0.487577 0.154874 0.133768 1.259548 1 1
199 0.059096 -0.370313 -0.760047 0.706270 -2.488266 -1.336692 -0.683584 0.436366 -0.150281 -0.711308 -0.851205 0.253942 -0.052516 1 1
200 0.147539 -0.233608 -0.578016 0.870637 -2.418094 -1.286070 -0.692623 0.342693 0.015890 -0.795418 -1.221248 0.309493 -0.526480 1 1
201 -0.076214 -1.055629 0.159389 -0.403318 -0.111273 -1.325990 -0.867502 0.519381 0.192007 -0.024629 0.220420 0.551046 0.399728 1 1
202 1.468986 0.518464 1.475456 -1.400891 0.408186 -1.831201 1.474742 0.566660 -0.403197 -1.295176 -0.443787 -1.884346 -1.993491 2 1
203 -1.739107 0.192104 -0.670709 -1.236237 -1.672915 -0.680127 0.027148 0.524909 1.865754 -0.634310 -0.607429 -1.471191 -0.632982 2 1
204 -0.663868 -0.862566 -0.329803 -0.857680 0.167824 -0.013328 0.176565 0.125832 0.609671 -1.296827 -0.435986 -1.341223 -0.977207 2 1
205 -0.739818 -0.668220 -0.077479 0.026286 0.027801 0.040659 -0.161646 -1.046948 -1.248976 -0.449243 1.046834 1.381194 1.646325 1 1
206 0.475752 0.695473 -0.072097 1.081397 -0.366985 -2.008080 0.515734 0.005330 1.193800 -0.841825 -2.650200 -3.862624 -2.115507 2 1
207 -1.331365 -1.632552 -0.876636 0.076190 1.187799 1.138590 1.235955 1.583447 0.890342 -1.587964 0.546109 1.565567 1.756993 0 1
208 -0.397476 0.090963 1.217996 0.773741 1.107204 -1.125870 -0.915396 -1.130561 -1.914456 -0.664474 -0.226576 0.112420 0.235011 1 1
209 -0.465823 -1.372705 -0.445436 0.316510 -1.492946 -1.103783 0.353513 -0.311377 -1.095388 -0.615078 -0.585868 0.172807 -0.860564 1 1
210 -0.594535 -1.761364 -1.069906 -0.502969 -1.411276 -0.906350 -0.559102 -1.240920 -2.254196 -1.206339 -0.528047 0.924112 0.472298 1 1
211 -1.022693 0.373374 -0.104205 -0.815628 -0.574733 0.906934 0.765114 -0.015386 0.110695 1.832325 0.712557 -0.951976 -0.678869 2 1
212 -0.967902 0.155275 0.013938 -0.549105 -0.907792 0.881907 0.609589 -0.135010 -0.373473 1.152134 0.386511 -0.744687 -0.447017 2 1
213 -1.238242 -0.062983 -0.133082 -0.158458 -0.338086 -0.411874 0.964537 0.870379 0.530337 0.858339 0.489332 -1.190977 -1.340484 2 1
214 0.349761 -1.391267 -3.069473 0.840195 1.044391 -1.052018 1.004856 1.478511 1.210060 -1.145325 2.653757 1.937234 0.592139 0 1
215 0.782819 -1.300386 -0.487318 0.850960 -2.046427 1.050631 0.289069 2.400271 2.707288 -0.278238 0.152360 1.912210 -0.208225 0 1
216 1.847553 -1.059174 -0.808403 0.400706 -0.275009 0.409744 -0.141885 0.706348 0.476002 0.990111 -0.168504 0.856440 -0.395652 1 1
217 2.608478 0.174234 2.534211 -0.985597 -0.436400 3.751943 1.560179 -2.367095 1.272529 2.464209 -0.954336 0.310720 -1.209456 0 1
218 -0.069569 0.418008 -0.004324 1.330358 0.365352 -0.582788 -0.527444 -0.298114 -0.353021 -1.118883 -0.459230 -0.986241 -0.041010 1 1

219 rows × 15 columns

In [157]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[157]:
<matplotlib.axes._subplots.AxesSubplot at 0x1e82d379160>
In [158]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[5]))

Urban Place

ANN

In [159]:
X = df_n_ps_std_mfcc[5].drop(columns='Cluster')
In [160]:
y = df_n_ps[5]['chosen']
In [161]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [162]:
X_train.shape
Out[162]:
(168, 13)
In [163]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [164]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [165]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [166]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'logistic', 'hidden_layer_sizes': (20,), 'learning_rate_init': 0.009, 'max_iter': 1000}, que permiten obtener un Accuracy de 73.81% y un Kappa del 36.33
Tiempo total: 24.57 minutos
In [167]:
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [168]:
input_tensor = Input(shape = (n0,))
In [169]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = 'tanh')(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [170]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [171]:
model.summary()
Model: "model_6"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_6 (InputLayer)         (None, 13)                0         
_________________________________________________________________
dense_18 (Dense)             (None, 20)                280       
_________________________________________________________________
dense_19 (Dense)             (None, 1)                 21        
=================================================================
Total params: 301
Trainable params: 301
Non-trainable params: 0
_________________________________________________________________
In [172]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), batch_size= 32,
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 168 samples, validate on 57 samples
Epoch 1/1000
168/168 [==============================] - 0s 767us/step - loss: 0.7409 - accuracy: 0.4524 - val_loss: 0.7005 - val_accuracy: 0.5263
Epoch 2/1000
168/168 [==============================] - 0s 77us/step - loss: 0.6410 - accuracy: 0.6429 - val_loss: 0.6336 - val_accuracy: 0.6316
Epoch 3/1000
168/168 [==============================] - 0s 83us/step - loss: 0.5912 - accuracy: 0.7024 - val_loss: 0.6007 - val_accuracy: 0.6491
Epoch 4/1000
168/168 [==============================] - 0s 89us/step - loss: 0.5613 - accuracy: 0.6905 - val_loss: 0.5951 - val_accuracy: 0.6491
Epoch 5/1000
168/168 [==============================] - 0s 83us/step - loss: 0.5452 - accuracy: 0.6964 - val_loss: 0.5939 - val_accuracy: 0.6667
Epoch 6/1000
168/168 [==============================] - 0s 59us/step - loss: 0.5329 - accuracy: 0.7202 - val_loss: 0.5949 - val_accuracy: 0.6491
Epoch 7/1000
168/168 [==============================] - 0s 65us/step - loss: 0.5235 - accuracy: 0.7202 - val_loss: 0.6057 - val_accuracy: 0.6667
Epoch 8/1000
168/168 [==============================] - 0s 59us/step - loss: 0.5187 - accuracy: 0.7440 - val_loss: 0.6144 - val_accuracy: 0.6316
Epoch 9/1000
168/168 [==============================] - 0s 65us/step - loss: 0.5105 - accuracy: 0.7381 - val_loss: 0.6141 - val_accuracy: 0.6491
Epoch 10/1000
168/168 [==============================] - 0s 65us/step - loss: 0.5042 - accuracy: 0.7560 - val_loss: 0.6174 - val_accuracy: 0.6491
Epoch 11/1000
168/168 [==============================] - 0s 71us/step - loss: 0.4950 - accuracy: 0.7560 - val_loss: 0.6176 - val_accuracy: 0.6491
Epoch 12/1000
168/168 [==============================] - 0s 59us/step - loss: 0.4889 - accuracy: 0.7500 - val_loss: 0.6170 - val_accuracy: 0.6491
Epoch 13/1000
168/168 [==============================] - 0s 65us/step - loss: 0.4805 - accuracy: 0.7679 - val_loss: 0.6190 - val_accuracy: 0.6491
Epoch 14/1000
168/168 [==============================] - 0s 59us/step - loss: 0.4721 - accuracy: 0.7738 - val_loss: 0.6194 - val_accuracy: 0.6491
Epoch 15/1000
168/168 [==============================] - 0s 59us/step - loss: 0.4649 - accuracy: 0.7798 - val_loss: 0.6190 - val_accuracy: 0.6491

Epoch 00015: ReduceLROnPlateau reducing learning rate to 0.0044999998062849045.
Epoch 16/1000
168/168 [==============================] - 0s 59us/step - loss: 0.4584 - accuracy: 0.7798 - val_loss: 0.6213 - val_accuracy: 0.6491
Epoch 17/1000
168/168 [==============================] - 0s 59us/step - loss: 0.4553 - accuracy: 0.7798 - val_loss: 0.6213 - val_accuracy: 0.6491
Epoch 18/1000
168/168 [==============================] - 0s 65us/step - loss: 0.4508 - accuracy: 0.7798 - val_loss: 0.6222 - val_accuracy: 0.6491
Epoch 19/1000
168/168 [==============================] - 0s 71us/step - loss: 0.4467 - accuracy: 0.7738 - val_loss: 0.6233 - val_accuracy: 0.6491
Epoch 20/1000
168/168 [==============================] - 0s 71us/step - loss: 0.4423 - accuracy: 0.7857 - val_loss: 0.6245 - val_accuracy: 0.6491
Epoch 21/1000
168/168 [==============================] - 0s 65us/step - loss: 0.4381 - accuracy: 0.7917 - val_loss: 0.6232 - val_accuracy: 0.6491
Epoch 22/1000
168/168 [==============================] - 0s 101us/step - loss: 0.4335 - accuracy: 0.7917 - val_loss: 0.6160 - val_accuracy: 0.6491
Epoch 23/1000
168/168 [==============================] - 0s 71us/step - loss: 0.4298 - accuracy: 0.7976 - val_loss: 0.6142 - val_accuracy: 0.6667
Epoch 24/1000
168/168 [==============================] - 0s 71us/step - loss: 0.4257 - accuracy: 0.8095 - val_loss: 0.6135 - val_accuracy: 0.6667
Epoch 25/1000
168/168 [==============================] - 0s 65us/step - loss: 0.4219 - accuracy: 0.8155 - val_loss: 0.6148 - val_accuracy: 0.6491

Epoch 00025: ReduceLROnPlateau reducing learning rate to 0.0022499999031424522.
Epoch 26/1000
168/168 [==============================] - 0s 65us/step - loss: 0.4179 - accuracy: 0.8155 - val_loss: 0.6160 - val_accuracy: 0.6667
Epoch 27/1000
168/168 [==============================] - 0s 71us/step - loss: 0.4160 - accuracy: 0.8214 - val_loss: 0.6163 - val_accuracy: 0.6667
Epoch 28/1000
168/168 [==============================] - 0s 65us/step - loss: 0.4142 - accuracy: 0.8274 - val_loss: 0.6172 - val_accuracy: 0.6667
Epoch 29/1000
168/168 [==============================] - 0s 65us/step - loss: 0.4122 - accuracy: 0.8274 - val_loss: 0.6182 - val_accuracy: 0.6667
Epoch 30/1000
168/168 [==============================] - 0s 71us/step - loss: 0.4098 - accuracy: 0.8274 - val_loss: 0.6202 - val_accuracy: 0.6667
Epoch 31/1000
168/168 [==============================] - 0s 59us/step - loss: 0.4077 - accuracy: 0.8274 - val_loss: 0.6221 - val_accuracy: 0.6667
Epoch 32/1000
168/168 [==============================] - 0s 77us/step - loss: 0.4055 - accuracy: 0.8274 - val_loss: 0.6252 - val_accuracy: 0.6491
Epoch 33/1000
168/168 [==============================] - 0s 71us/step - loss: 0.4037 - accuracy: 0.8155 - val_loss: 0.6261 - val_accuracy: 0.6491
Epoch 34/1000
168/168 [==============================] - 0s 59us/step - loss: 0.4013 - accuracy: 0.8214 - val_loss: 0.6267 - val_accuracy: 0.6491
Epoch 35/1000
168/168 [==============================] - 0s 65us/step - loss: 0.3991 - accuracy: 0.8214 - val_loss: 0.6266 - val_accuracy: 0.6491

Epoch 00035: ReduceLROnPlateau reducing learning rate to 0.0011249999515712261.
Epoch 36/1000
168/168 [==============================] - 0s 65us/step - loss: 0.3970 - accuracy: 0.8214 - val_loss: 0.6259 - val_accuracy: 0.6491
Epoch 37/1000
168/168 [==============================] - 0s 65us/step - loss: 0.3957 - accuracy: 0.8214 - val_loss: 0.6258 - val_accuracy: 0.6491
Epoch 38/1000
168/168 [==============================] - 0s 54us/step - loss: 0.3946 - accuracy: 0.8214 - val_loss: 0.6257 - val_accuracy: 0.6491
Epoch 39/1000
168/168 [==============================] - 0s 59us/step - loss: 0.3940 - accuracy: 0.8214 - val_loss: 0.6264 - val_accuracy: 0.6491
Epoch 40/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3922 - accuracy: 0.8214 - val_loss: 0.6264 - val_accuracy: 0.6667
Epoch 41/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3911 - accuracy: 0.8214 - val_loss: 0.6260 - val_accuracy: 0.6667
Epoch 42/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3900 - accuracy: 0.8214 - val_loss: 0.6263 - val_accuracy: 0.6667
Epoch 43/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3891 - accuracy: 0.8214 - val_loss: 0.6270 - val_accuracy: 0.6667
Epoch 44/1000
168/168 [==============================] - 0s 65us/step - loss: 0.3881 - accuracy: 0.8214 - val_loss: 0.6279 - val_accuracy: 0.6491
Epoch 45/1000
168/168 [==============================] - 0s 65us/step - loss: 0.3866 - accuracy: 0.8214 - val_loss: 0.6281 - val_accuracy: 0.6491

Epoch 00045: ReduceLROnPlateau reducing learning rate to 0.0005624999757856131.
Epoch 46/1000
168/168 [==============================] - 0s 65us/step - loss: 0.3857 - accuracy: 0.8274 - val_loss: 0.6285 - val_accuracy: 0.6667
Epoch 47/1000
168/168 [==============================] - 0s 65us/step - loss: 0.3851 - accuracy: 0.8274 - val_loss: 0.6289 - val_accuracy: 0.6491
Epoch 48/1000
168/168 [==============================] - 0s 65us/step - loss: 0.3843 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6491
Epoch 49/1000
168/168 [==============================] - 0s 59us/step - loss: 0.3837 - accuracy: 0.8214 - val_loss: 0.6292 - val_accuracy: 0.6491
Epoch 50/1000
168/168 [==============================] - 0s 59us/step - loss: 0.3833 - accuracy: 0.8214 - val_loss: 0.6292 - val_accuracy: 0.6491
Epoch 51/1000
168/168 [==============================] - 0s 59us/step - loss: 0.3826 - accuracy: 0.8214 - val_loss: 0.6293 - val_accuracy: 0.6491
Epoch 52/1000
168/168 [==============================] - 0s 65us/step - loss: 0.3821 - accuracy: 0.8214 - val_loss: 0.6292 - val_accuracy: 0.6491
Epoch 53/1000
168/168 [==============================] - 0s 65us/step - loss: 0.3815 - accuracy: 0.8214 - val_loss: 0.6294 - val_accuracy: 0.6491
Epoch 54/1000
168/168 [==============================] - 0s 59us/step - loss: 0.3808 - accuracy: 0.8214 - val_loss: 0.6294 - val_accuracy: 0.6667
Epoch 55/1000
168/168 [==============================] - 0s 59us/step - loss: 0.3803 - accuracy: 0.8214 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00055: ReduceLROnPlateau reducing learning rate to 0.00028124998789280653.
Epoch 56/1000
168/168 [==============================] - 0s 71us/step - loss: 0.3797 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6491
Epoch 57/1000
168/168 [==============================] - 0s 54us/step - loss: 0.3794 - accuracy: 0.8274 - val_loss: 0.6293 - val_accuracy: 0.6491
Epoch 58/1000
168/168 [==============================] - 0s 71us/step - loss: 0.3792 - accuracy: 0.8274 - val_loss: 0.6294 - val_accuracy: 0.6491
Epoch 59/1000
168/168 [==============================] - 0s 59us/step - loss: 0.3789 - accuracy: 0.8274 - val_loss: 0.6296 - val_accuracy: 0.6667
Epoch 60/1000
168/168 [==============================] - 0s 59us/step - loss: 0.3786 - accuracy: 0.8274 - val_loss: 0.6296 - val_accuracy: 0.6667
Epoch 61/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3783 - accuracy: 0.8274 - val_loss: 0.6294 - val_accuracy: 0.6667
Epoch 62/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3780 - accuracy: 0.8274 - val_loss: 0.6293 - val_accuracy: 0.6667
Epoch 63/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3777 - accuracy: 0.8274 - val_loss: 0.6294 - val_accuracy: 0.6491
Epoch 64/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3774 - accuracy: 0.8274 - val_loss: 0.6295 - val_accuracy: 0.6491
Epoch 65/1000
168/168 [==============================] - 0s 59us/step - loss: 0.3771 - accuracy: 0.8274 - val_loss: 0.6295 - val_accuracy: 0.6491

Epoch 00065: ReduceLROnPlateau reducing learning rate to 0.00014062499394640326.
Epoch 66/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3769 - accuracy: 0.8274 - val_loss: 0.6296 - val_accuracy: 0.6491
Epoch 67/1000
168/168 [==============================] - 0s 65us/step - loss: 0.3767 - accuracy: 0.8274 - val_loss: 0.6295 - val_accuracy: 0.6491
Epoch 68/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3766 - accuracy: 0.8274 - val_loss: 0.6295 - val_accuracy: 0.6491
Epoch 69/1000
168/168 [==============================] - 0s 65us/step - loss: 0.3764 - accuracy: 0.8274 - val_loss: 0.6295 - val_accuracy: 0.6491
Epoch 70/1000
168/168 [==============================] - 0s 65us/step - loss: 0.3763 - accuracy: 0.8274 - val_loss: 0.6295 - val_accuracy: 0.6491
Epoch 71/1000
168/168 [==============================] - 0s 65us/step - loss: 0.3761 - accuracy: 0.8274 - val_loss: 0.6295 - val_accuracy: 0.6491
Epoch 72/1000
168/168 [==============================] - 0s 65us/step - loss: 0.3760 - accuracy: 0.8274 - val_loss: 0.6295 - val_accuracy: 0.6491
Epoch 73/1000
168/168 [==============================] - 0s 71us/step - loss: 0.3759 - accuracy: 0.8274 - val_loss: 0.6295 - val_accuracy: 0.6491
Epoch 74/1000
168/168 [==============================] - 0s 65us/step - loss: 0.3757 - accuracy: 0.8274 - val_loss: 0.6294 - val_accuracy: 0.6491
Epoch 75/1000
168/168 [==============================] - 0s 65us/step - loss: 0.3756 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00075: ReduceLROnPlateau reducing learning rate to 7.031249697320163e-05.
Epoch 76/1000
168/168 [==============================] - 0s 65us/step - loss: 0.3754 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 77/1000
168/168 [==============================] - 0s 65us/step - loss: 0.3754 - accuracy: 0.8274 - val_loss: 0.6291 - val_accuracy: 0.6667
Epoch 78/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3753 - accuracy: 0.8274 - val_loss: 0.6291 - val_accuracy: 0.6667
Epoch 79/1000
168/168 [==============================] - 0s 125us/step - loss: 0.3752 - accuracy: 0.8274 - val_loss: 0.6291 - val_accuracy: 0.6667
Epoch 80/1000
168/168 [==============================] - 0s 119us/step - loss: 0.3751 - accuracy: 0.8274 - val_loss: 0.6291 - val_accuracy: 0.6667
Epoch 81/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3751 - accuracy: 0.8274 - val_loss: 0.6291 - val_accuracy: 0.6667
Epoch 82/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3750 - accuracy: 0.8274 - val_loss: 0.6291 - val_accuracy: 0.6667
Epoch 83/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3749 - accuracy: 0.8274 - val_loss: 0.6290 - val_accuracy: 0.6667
Epoch 84/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3748 - accuracy: 0.8274 - val_loss: 0.6290 - val_accuracy: 0.6667
Epoch 85/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3748 - accuracy: 0.8274 - val_loss: 0.6290 - val_accuracy: 0.6667

Epoch 00085: ReduceLROnPlateau reducing learning rate to 3.5156248486600816e-05.
Epoch 86/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3747 - accuracy: 0.8274 - val_loss: 0.6290 - val_accuracy: 0.6667
Epoch 87/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3747 - accuracy: 0.8274 - val_loss: 0.6290 - val_accuracy: 0.6667
Epoch 88/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3746 - accuracy: 0.8274 - val_loss: 0.6290 - val_accuracy: 0.6667
Epoch 89/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3746 - accuracy: 0.8274 - val_loss: 0.6290 - val_accuracy: 0.6667
Epoch 90/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3746 - accuracy: 0.8274 - val_loss: 0.6291 - val_accuracy: 0.6667
Epoch 91/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3745 - accuracy: 0.8274 - val_loss: 0.6291 - val_accuracy: 0.6667
Epoch 92/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3745 - accuracy: 0.8274 - val_loss: 0.6291 - val_accuracy: 0.6667
Epoch 93/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3744 - accuracy: 0.8274 - val_loss: 0.6291 - val_accuracy: 0.6667
Epoch 94/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3744 - accuracy: 0.8274 - val_loss: 0.6291 - val_accuracy: 0.6667
Epoch 95/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3744 - accuracy: 0.8274 - val_loss: 0.6291 - val_accuracy: 0.6667

Epoch 00095: ReduceLROnPlateau reducing learning rate to 1.7578124243300408e-05.
Epoch 96/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3743 - accuracy: 0.8274 - val_loss: 0.6291 - val_accuracy: 0.6667
Epoch 97/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3743 - accuracy: 0.8274 - val_loss: 0.6291 - val_accuracy: 0.6667
Epoch 98/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3743 - accuracy: 0.8274 - val_loss: 0.6291 - val_accuracy: 0.6667
Epoch 99/1000
168/168 [==============================] - 0s 119us/step - loss: 0.3743 - accuracy: 0.8274 - val_loss: 0.6291 - val_accuracy: 0.6667
Epoch 100/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3743 - accuracy: 0.8274 - val_loss: 0.6291 - val_accuracy: 0.6667
Epoch 101/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3743 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 102/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3742 - accuracy: 0.8274 - val_loss: 0.6291 - val_accuracy: 0.6667
Epoch 103/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3742 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 104/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3742 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 105/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3742 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00105: ReduceLROnPlateau reducing learning rate to 8.789062121650204e-06.
Epoch 106/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3742 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 107/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3742 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 108/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3741 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 109/1000
168/168 [==============================] - 0s 125us/step - loss: 0.3741 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 110/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3741 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 111/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3741 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 112/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3741 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 113/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3741 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 114/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3741 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 115/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3741 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00115: ReduceLROnPlateau reducing learning rate to 4.394531060825102e-06.
Epoch 116/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3741 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 117/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3741 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 118/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3741 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 119/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3741 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 120/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3741 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 121/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 122/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 123/1000
168/168 [==============================] - 0s 125us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 124/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 125/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00125: ReduceLROnPlateau reducing learning rate to 2.197265530412551e-06.
Epoch 126/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 127/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 128/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 129/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 130/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 131/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 132/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 133/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 134/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 135/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00135: ReduceLROnPlateau reducing learning rate to 1.0986327652062755e-06.
Epoch 136/1000
168/168 [==============================] - 0s 137us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 137/1000
168/168 [==============================] - 0s 125us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 138/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 139/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 140/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 141/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 142/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 143/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 144/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 145/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00145: ReduceLROnPlateau reducing learning rate to 5.493163826031378e-07.
Epoch 146/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 147/1000
168/168 [==============================] - 0s 125us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 148/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 149/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 150/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 151/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 152/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 153/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 154/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 155/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00155: ReduceLROnPlateau reducing learning rate to 2.746581913015689e-07.
Epoch 156/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 157/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 158/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 159/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 160/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 161/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 162/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 163/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 164/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 165/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00165: ReduceLROnPlateau reducing learning rate to 1.3732909565078444e-07.
Epoch 166/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 167/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 168/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 169/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 170/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 171/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 172/1000
168/168 [==============================] - 0s 119us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 173/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 174/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 175/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00175: ReduceLROnPlateau reducing learning rate to 6.866454782539222e-08.
Epoch 176/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 177/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 178/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 179/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 180/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 181/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 182/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 183/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 184/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 185/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00185: ReduceLROnPlateau reducing learning rate to 3.433227391269611e-08.
Epoch 186/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 187/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 188/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 189/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 190/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 191/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 192/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 193/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 194/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 195/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00195: ReduceLROnPlateau reducing learning rate to 1.7166136956348055e-08.
Epoch 196/1000
168/168 [==============================] - 0s 125us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 197/1000
168/168 [==============================] - 0s 119us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 198/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 199/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 200/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 201/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 202/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 203/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 204/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 205/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00205: ReduceLROnPlateau reducing learning rate to 8.583068478174027e-09.
Epoch 206/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 207/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 208/1000
168/168 [==============================] - ETA: 0s - loss: 0.4425 - accuracy: 0.71 - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 209/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 210/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 211/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 212/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 213/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 214/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 215/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00215: ReduceLROnPlateau reducing learning rate to 4.291534239087014e-09.
Epoch 216/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 217/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 218/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 219/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 220/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 221/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 222/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 223/1000
168/168 [==============================] - 0s 119us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 224/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 225/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00225: ReduceLROnPlateau reducing learning rate to 2.145767119543507e-09.
Epoch 226/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 227/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 228/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 229/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 230/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 231/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 232/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 233/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 234/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 235/1000
168/168 [==============================] - 0s 125us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00235: ReduceLROnPlateau reducing learning rate to 1.0728835597717534e-09.
Epoch 236/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 237/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 238/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 239/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 240/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 241/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 242/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 243/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 244/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 245/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00245: ReduceLROnPlateau reducing learning rate to 5.364417798858767e-10.
Epoch 246/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 247/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 248/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 249/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 250/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 251/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 252/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 253/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 254/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 255/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00255: ReduceLROnPlateau reducing learning rate to 2.6822088994293836e-10.
Epoch 256/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 257/1000
168/168 [==============================] - 0s 119us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 258/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 259/1000
168/168 [==============================] - 0s 125us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 260/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 261/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 262/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 263/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 264/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 265/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00265: ReduceLROnPlateau reducing learning rate to 1.3411044497146918e-10.
Epoch 266/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 267/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 268/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 269/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 270/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 271/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 272/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 273/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 274/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 275/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00275: ReduceLROnPlateau reducing learning rate to 6.705522248573459e-11.
Epoch 276/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 277/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 278/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 279/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 280/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 281/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 282/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 283/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 284/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 285/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00285: ReduceLROnPlateau reducing learning rate to 3.3527611242867295e-11.
Epoch 286/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 287/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 288/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 289/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 290/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 291/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 292/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 293/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 294/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 295/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00295: ReduceLROnPlateau reducing learning rate to 1.6763805621433647e-11.
Epoch 296/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 297/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 298/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 299/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 300/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 301/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 302/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 303/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 304/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 305/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00305: ReduceLROnPlateau reducing learning rate to 8.381902810716824e-12.
Epoch 306/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 307/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 308/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 309/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 310/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 311/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 312/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 313/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 314/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 315/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00315: ReduceLROnPlateau reducing learning rate to 4.190951405358412e-12.
Epoch 316/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 317/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 318/1000
168/168 [==============================] - 0s 149us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 319/1000
168/168 [==============================] - ETA: 0s - loss: 0.3350 - accuracy: 0.87 - 0s 119us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 320/1000
168/168 [==============================] - 0s 143us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 321/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 322/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 323/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 324/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 325/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00325: ReduceLROnPlateau reducing learning rate to 2.095475702679206e-12.
Epoch 326/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 327/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 328/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 329/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 330/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 331/1000
168/168 [==============================] - 0s 119us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 332/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 333/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 334/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 335/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00335: ReduceLROnPlateau reducing learning rate to 1.047737851339603e-12.
Epoch 336/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 337/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 338/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 339/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 340/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 341/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 342/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 343/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 344/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 345/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00345: ReduceLROnPlateau reducing learning rate to 5.238689256698015e-13.
Epoch 346/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 347/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 348/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 349/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 350/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 351/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 352/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 353/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 354/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 355/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00355: ReduceLROnPlateau reducing learning rate to 2.6193446283490074e-13.
Epoch 356/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 357/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 358/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 359/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 360/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 361/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 362/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 363/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 364/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 365/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00365: ReduceLROnPlateau reducing learning rate to 1.3096723141745037e-13.
Epoch 366/1000
168/168 [==============================] - 0s 196us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 367/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 368/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 369/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 370/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 371/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 372/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 373/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 374/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 375/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00375: ReduceLROnPlateau reducing learning rate to 6.548361570872518e-14.
Epoch 376/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 377/1000
168/168 [==============================] - 0s 119us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 378/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 379/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 380/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 381/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 382/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 383/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 384/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 385/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00385: ReduceLROnPlateau reducing learning rate to 3.274180785436259e-14.
Epoch 386/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 387/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 388/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 389/1000
168/168 [==============================] - 0s 137us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 390/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 391/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 392/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 393/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 394/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 395/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00395: ReduceLROnPlateau reducing learning rate to 1.6370903927181296e-14.
Epoch 396/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 397/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 398/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 399/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 400/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 401/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 402/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 403/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 404/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 405/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00405: ReduceLROnPlateau reducing learning rate to 8.185451963590648e-15.
Epoch 406/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 407/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 408/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 409/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 410/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 411/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 412/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 413/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 414/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 415/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00415: ReduceLROnPlateau reducing learning rate to 4.092725981795324e-15.
Epoch 416/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 417/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 418/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 419/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 420/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 421/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 422/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 423/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 424/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 425/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00425: ReduceLROnPlateau reducing learning rate to 2.046362990897662e-15.
Epoch 426/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 427/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 428/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 429/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 430/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 431/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 432/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 433/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 434/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 435/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00435: ReduceLROnPlateau reducing learning rate to 1.023181495448831e-15.
Epoch 436/1000
168/168 [==============================] - 0s 143us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 437/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 438/1000
168/168 [==============================] - 0s 119us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 439/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 440/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 441/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 442/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 443/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 444/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 445/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00445: ReduceLROnPlateau reducing learning rate to 5.115907477244155e-16.
Epoch 446/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 447/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 448/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 449/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 450/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 451/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 452/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 453/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 454/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 455/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00455: ReduceLROnPlateau reducing learning rate to 2.5579537386220775e-16.
Epoch 456/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 457/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 458/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 459/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 460/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 461/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 462/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 463/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 464/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 465/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00465: ReduceLROnPlateau reducing learning rate to 1.2789768693110388e-16.
Epoch 466/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 467/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 468/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 469/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 470/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 471/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 472/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 473/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 474/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 475/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00475: ReduceLROnPlateau reducing learning rate to 6.394884346555194e-17.
Epoch 476/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 477/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 478/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 479/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 480/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 481/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 482/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 483/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 484/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 485/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00485: ReduceLROnPlateau reducing learning rate to 3.197442173277597e-17.
Epoch 486/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 487/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 488/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 489/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 490/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 491/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 492/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 493/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 494/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 495/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00495: ReduceLROnPlateau reducing learning rate to 1.5987210866387985e-17.
Epoch 496/1000
168/168 [==============================] - 0s 119us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 497/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 498/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 499/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 500/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 501/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 502/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 503/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 504/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 505/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00505: ReduceLROnPlateau reducing learning rate to 7.993605433193992e-18.
Epoch 506/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 507/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 508/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 509/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 510/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 511/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 512/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 513/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 514/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 515/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00515: ReduceLROnPlateau reducing learning rate to 3.996802716596996e-18.
Epoch 516/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 517/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 518/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 519/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 520/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 521/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 522/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 523/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 524/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 525/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00525: ReduceLROnPlateau reducing learning rate to 1.998401358298498e-18.
Epoch 526/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 527/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 528/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 529/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 530/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 531/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 532/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 533/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 534/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 535/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00535: ReduceLROnPlateau reducing learning rate to 9.99200679149249e-19.
Epoch 536/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 537/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 538/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 539/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 540/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 541/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 542/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 543/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 544/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 545/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00545: ReduceLROnPlateau reducing learning rate to 4.996003395746245e-19.
Epoch 546/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 547/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 548/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 549/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 550/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 551/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 552/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 553/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 554/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 555/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00555: ReduceLROnPlateau reducing learning rate to 2.4980016978731226e-19.
Epoch 556/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 557/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 558/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 559/1000
168/168 [==============================] - 0s 119us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 560/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 561/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 562/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 563/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 564/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 565/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00565: ReduceLROnPlateau reducing learning rate to 1.2490008489365613e-19.
Epoch 566/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 567/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 568/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 569/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 570/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 571/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 572/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 573/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 574/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 575/1000
168/168 [==============================] - 0s 125us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00575: ReduceLROnPlateau reducing learning rate to 6.245004244682806e-20.
Epoch 576/1000
168/168 [==============================] - 0s 125us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 577/1000
168/168 [==============================] - 0s 119us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 578/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 579/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 580/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 581/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 582/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 583/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 584/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 585/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00585: ReduceLROnPlateau reducing learning rate to 3.122502122341403e-20.
Epoch 586/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 587/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 588/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 589/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 590/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 591/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 592/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 593/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 594/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 595/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00595: ReduceLROnPlateau reducing learning rate to 1.5612510611707016e-20.
Epoch 596/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 597/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 598/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 599/1000
168/168 [==============================] - 0s 119us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 600/1000
168/168 [==============================] - 0s 131us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 601/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 602/1000
168/168 [==============================] - 0s 125us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 603/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 604/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 605/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00605: ReduceLROnPlateau reducing learning rate to 7.806255305853508e-21.
Epoch 606/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 607/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 608/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 609/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 610/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 611/1000
168/168 [==============================] - 0s 155us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 612/1000
168/168 [==============================] - 0s 149us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 613/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 614/1000
168/168 [==============================] - 0s 119us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 615/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00615: ReduceLROnPlateau reducing learning rate to 3.903127652926754e-21.
Epoch 616/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 617/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 618/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 619/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 620/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 621/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 622/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 623/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 624/1000
168/168 [==============================] - 0s 119us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 625/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00625: ReduceLROnPlateau reducing learning rate to 1.951563826463377e-21.
Epoch 626/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 627/1000
168/168 [==============================] - 0s 125us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 628/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 629/1000
168/168 [==============================] - 0s 119us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 630/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 631/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 632/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 633/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 634/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 635/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00635: ReduceLROnPlateau reducing learning rate to 9.757819132316885e-22.
Epoch 636/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 637/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 638/1000
168/168 [==============================] - 0s 125us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 639/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 640/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 641/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 642/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 643/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 644/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 645/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00645: ReduceLROnPlateau reducing learning rate to 4.878909566158443e-22.
Epoch 646/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 647/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 648/1000
168/168 [==============================] - ETA: 0s - loss: 0.4081 - accuracy: 0.78 - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 649/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 650/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 651/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 652/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 653/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 654/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 655/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00655: ReduceLROnPlateau reducing learning rate to 2.4394547830792213e-22.
Epoch 656/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 657/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 658/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 659/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 660/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 661/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 662/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 663/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 664/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 665/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00665: ReduceLROnPlateau reducing learning rate to 1.2197273915396106e-22.
Epoch 666/1000
168/168 [==============================] - 0s 143us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 667/1000
168/168 [==============================] - 0s 125us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 668/1000
168/168 [==============================] - 0s 119us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 669/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 670/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 671/1000
168/168 [==============================] - 0s 184us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 672/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 673/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 674/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 675/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00675: ReduceLROnPlateau reducing learning rate to 6.098636957698053e-23.
Epoch 676/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 677/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 678/1000
168/168 [==============================] - 0s 119us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 679/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 680/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 681/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 682/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 683/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 684/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 685/1000
168/168 [==============================] - 0s 119us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00685: ReduceLROnPlateau reducing learning rate to 3.0493184788490266e-23.
Epoch 686/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 687/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 688/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 689/1000
168/168 [==============================] - 0s 125us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 690/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 691/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 692/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 693/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 694/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 695/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00695: ReduceLROnPlateau reducing learning rate to 1.5246592394245133e-23.
Epoch 696/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 697/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 698/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 699/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 700/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 701/1000
168/168 [==============================] - 0s 125us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 702/1000
168/168 [==============================] - 0s 125us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 703/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 704/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 705/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00705: ReduceLROnPlateau reducing learning rate to 7.623296197122566e-24.
Epoch 706/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 707/1000
168/168 [==============================] - ETA: 0s - loss: 0.3427 - accuracy: 0.90 - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 708/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 709/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 710/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 711/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 712/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 713/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 714/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 715/1000
168/168 [==============================] - 0s 119us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00715: ReduceLROnPlateau reducing learning rate to 3.811648098561283e-24.
Epoch 716/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 717/1000
168/168 [==============================] - 0s 125us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 718/1000
168/168 [==============================] - 0s 119us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 719/1000
168/168 [==============================] - 0s 119us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 720/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 721/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 722/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 723/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 724/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 725/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00725: ReduceLROnPlateau reducing learning rate to 1.9058240492806416e-24.
Epoch 726/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 727/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 728/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 729/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 730/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 731/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 732/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 733/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 734/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 735/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00735: ReduceLROnPlateau reducing learning rate to 9.529120246403208e-25.
Epoch 736/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 737/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 738/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 739/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 740/1000
168/168 [==============================] - 0s 125us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 741/1000
168/168 [==============================] - 0s 119us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 742/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 743/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 744/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 745/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00745: ReduceLROnPlateau reducing learning rate to 4.764560123201604e-25.
Epoch 746/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 747/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 748/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 749/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 750/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 751/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 752/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 753/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 754/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 755/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00755: ReduceLROnPlateau reducing learning rate to 2.382280061600802e-25.
Epoch 756/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 757/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 758/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 759/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 760/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 761/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 762/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 763/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 764/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 765/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00765: ReduceLROnPlateau reducing learning rate to 1.191140030800401e-25.
Epoch 766/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 767/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 768/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 769/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 770/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 771/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 772/1000
168/168 [==============================] - 0s 119us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 773/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 774/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 775/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00775: ReduceLROnPlateau reducing learning rate to 5.955700154002005e-26.
Epoch 776/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 777/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 778/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 779/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 780/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 781/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 782/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 783/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 784/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 785/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00785: ReduceLROnPlateau reducing learning rate to 2.9778500770010025e-26.
Epoch 786/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 787/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 788/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 789/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 790/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 791/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 792/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 793/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 794/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 795/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00795: ReduceLROnPlateau reducing learning rate to 1.4889250385005013e-26.
Epoch 796/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 797/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 798/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 799/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 800/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 801/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 802/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 803/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 804/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 805/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00805: ReduceLROnPlateau reducing learning rate to 7.444625192502506e-27.
Epoch 806/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 807/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 808/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 809/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 810/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 811/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 812/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 813/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 814/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 815/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00815: ReduceLROnPlateau reducing learning rate to 3.722312596251253e-27.
Epoch 816/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 817/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 818/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 819/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 820/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 821/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 822/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 823/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 824/1000
168/168 [==============================] - 0s 119us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 825/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00825: ReduceLROnPlateau reducing learning rate to 1.8611562981256266e-27.
Epoch 826/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 827/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 828/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 829/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 830/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 831/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 832/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 833/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 834/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 835/1000
168/168 [==============================] - 0s 119us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00835: ReduceLROnPlateau reducing learning rate to 9.305781490628133e-28.
Epoch 836/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 837/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 838/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 839/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 840/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 841/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 842/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 843/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 844/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 845/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00845: ReduceLROnPlateau reducing learning rate to 4.6528907453140665e-28.
Epoch 846/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 847/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 848/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 849/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 850/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 851/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 852/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 853/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 854/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 855/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00855: ReduceLROnPlateau reducing learning rate to 2.3264453726570332e-28.
Epoch 856/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 857/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 858/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 859/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 860/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 861/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 862/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 863/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 864/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 865/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00865: ReduceLROnPlateau reducing learning rate to 1.1632226863285166e-28.
Epoch 866/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 867/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 868/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 869/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 870/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 871/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 872/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 873/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 874/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 875/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00875: ReduceLROnPlateau reducing learning rate to 5.816113431642583e-29.
Epoch 876/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 877/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 878/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 879/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 880/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 881/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 882/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 883/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 884/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 885/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00885: ReduceLROnPlateau reducing learning rate to 2.9080567158212915e-29.
Epoch 886/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 887/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 888/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 889/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 890/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 891/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 892/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 893/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 894/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 895/1000
168/168 [==============================] - 0s 119us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00895: ReduceLROnPlateau reducing learning rate to 1.4540283579106458e-29.
Epoch 896/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 897/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 898/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 899/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 900/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 901/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 902/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 903/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 904/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 905/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00905: ReduceLROnPlateau reducing learning rate to 7.270141789553229e-30.
Epoch 906/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 907/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 908/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 909/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 910/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 911/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 912/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 913/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 914/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 915/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00915: ReduceLROnPlateau reducing learning rate to 3.6350708947766144e-30.
Epoch 916/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 917/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 918/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 919/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 920/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 921/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 922/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 923/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 924/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 925/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00925: ReduceLROnPlateau reducing learning rate to 1.8175354473883072e-30.
Epoch 926/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 927/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 928/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 929/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 930/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 931/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 932/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 933/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 934/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 935/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00935: ReduceLROnPlateau reducing learning rate to 9.087677236941536e-31.
Epoch 936/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 937/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 938/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 939/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 940/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 941/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 942/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 943/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 944/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 945/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00945: ReduceLROnPlateau reducing learning rate to 4.543838618470768e-31.
Epoch 946/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 947/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 948/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 949/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 950/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 951/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 952/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 953/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 954/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 955/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00955: ReduceLROnPlateau reducing learning rate to 2.271919309235384e-31.
Epoch 956/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 957/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 958/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 959/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 960/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 961/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 962/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 963/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 964/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 965/1000
168/168 [==============================] - 0s 101us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00965: ReduceLROnPlateau reducing learning rate to 1.135959654617692e-31.
Epoch 966/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 967/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 968/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 969/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 970/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 971/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 972/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 973/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 974/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 975/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00975: ReduceLROnPlateau reducing learning rate to 5.67979827308846e-32.
Epoch 976/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 977/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 978/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 979/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 980/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 981/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 982/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 983/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 984/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 985/1000
168/168 [==============================] - 0s 95us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00985: ReduceLROnPlateau reducing learning rate to 2.83989913654423e-32.
Epoch 986/1000
168/168 [==============================] - 0s 77us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 987/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 988/1000
168/168 [==============================] - 0s 113us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 989/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 990/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 991/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 992/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 993/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 994/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 995/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667

Epoch 00995: ReduceLROnPlateau reducing learning rate to 1.419949568272115e-32.
Epoch 996/1000
168/168 [==============================] - 0s 83us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 997/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 998/1000
168/168 [==============================] - 0s 89us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 999/1000
168/168 [==============================] - 0s 107us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
Epoch 1000/1000
168/168 [==============================] - 0s 232us/step - loss: 0.3740 - accuracy: 0.8274 - val_loss: 0.6292 - val_accuracy: 0.6667
In [173]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 1000)
In [174]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
57/57 [==============================] - 0s 70us/step
test loss: 0.6292092747855604, test accuracy: 0.6666666865348816
In [175]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.6737891737891738
In [176]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
Kappa:  0.13842482100238673

KMeans

In [177]:
X
Out[177]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13
0 -1.430409 -0.286045 0.490919 0.872836 -0.908379 -0.032724 -0.630149 -0.077256 -0.666116 -0.409507 -0.808189 -0.316827 1.001635
1 -0.282157 -1.478798 -1.125065 1.134727 0.294401 -1.552108 -2.048764 -0.332843 -0.298952 1.114161 0.274322 1.796602 2.309045
2 -0.419749 0.440468 0.853257 0.239250 -0.398831 -0.780274 -0.360447 -0.316940 -0.976474 0.879549 -0.457280 0.640345 0.645756
3 -0.431942 -1.517593 0.673149 0.786628 -1.306695 1.366669 1.142564 -2.034919 -0.374780 0.475572 -0.952521 0.198178 0.640593
4 0.011173 0.536562 -0.966199 -0.482951 0.331291 0.606478 0.539582 -0.270964 0.042364 -0.165555 0.123590 0.101357 -0.658096
5 0.126251 -0.590904 -1.517639 -1.072279 0.624234 1.023491 0.771080 0.598433 -0.513511 -0.860661 0.652573 1.694095 0.860230
6 0.830490 -0.982750 2.985373 1.132038 0.506664 3.816969 -0.181042 -0.077794 -0.404476 -0.632689 -0.730525 -0.299915 -0.619079
7 -0.461917 0.208363 0.278132 2.301636 1.667266 0.690336 1.117026 1.428294 1.041434 0.404890 -1.225161 -1.083934 0.065409
8 0.249199 -0.247143 -0.916299 0.679460 -0.342859 0.174455 0.125219 0.900110 0.761341 -0.398986 -1.063148 0.855251 2.099798
9 0.311144 1.364526 -0.004701 0.405862 -0.939024 0.796235 -0.589807 -0.084900 0.092344 -0.628186 -0.601718 -0.966679 0.086483
10 1.334587 0.242352 0.597807 1.994512 -0.050194 0.166573 -0.234755 0.602868 -1.535513 -3.732625 -0.901353 0.263896 -1.060994
11 1.318614 -0.797973 0.011175 -0.735851 -0.310309 -1.472949 -0.886459 0.062987 -0.118121 0.096635 0.405075 -1.082018 0.068160
12 0.824621 1.143049 0.195198 -0.036161 0.107483 -0.112574 -0.229915 -0.249936 0.727625 -0.048902 0.611366 0.128240 0.758492
13 -0.568472 -0.848858 0.090767 1.154534 -0.215999 -2.659688 -1.881352 0.412350 -0.790718 0.827656 0.539638 1.593572 2.002635
14 0.406968 1.201176 1.075155 -0.124700 0.135767 -0.178145 -0.204776 -1.618374 0.193121 0.693318 1.055785 -0.160648 0.537962
15 -0.287723 1.515404 0.539186 0.427811 -0.006824 0.845973 0.412855 -0.313048 0.703395 0.284501 -0.348641 -0.519018 -0.664237
16 0.522131 0.617054 0.277861 -0.836147 -1.629099 -0.005864 0.191114 0.043306 0.248219 0.814197 -0.101490 -0.978501 -0.398761
17 -0.896889 -1.308091 -0.693222 0.501341 -0.162478 -0.037095 -0.270506 0.346239 0.691093 -0.015841 -0.871109 -1.169731 0.270587
18 0.875184 0.085665 0.080385 0.242890 -0.938716 -1.140718 -0.097002 -0.428159 -0.490553 -0.095430 -0.233748 -0.812311 0.895950
19 -0.659140 -0.549349 -0.129098 1.107800 -3.028026 -0.963090 0.170241 -2.196582 -0.518279 1.312693 -1.543068 -0.398880 0.589793
20 -0.563764 -2.009854 0.074903 -0.464555 -1.109839 0.088831 -0.566914 0.384785 -0.625854 -0.723645 -1.000855 0.809858 0.131808
21 1.250269 0.010904 0.201698 -0.957619 -0.013951 1.101073 -0.075927 -0.502371 -1.866004 0.114645 0.156305 0.629372 1.366339
22 -1.190023 -0.268956 -0.555588 -0.621566 -0.230031 0.264870 -0.241968 0.258810 0.329697 0.359684 0.486221 0.532413 0.397267
23 -0.802365 0.152404 -0.567015 0.381822 0.592009 -0.562610 -1.563042 0.721323 2.941459 2.101624 0.519023 -0.366100 -0.253206
24 -0.893865 0.341962 -0.830315 0.329795 0.563212 -0.327526 -1.488529 0.513889 2.829533 2.088829 0.636581 -0.214855 -0.258980
25 -0.121562 1.347439 0.744281 0.698770 -0.746534 -0.657656 -0.027263 -0.447918 0.338675 1.028922 0.807479 0.808201 -0.400533
26 -1.615036 1.323285 0.625582 0.721012 -0.691466 -0.853918 -0.638037 -0.521595 0.602985 0.889454 -0.304429 -1.597203 -0.132420
27 -0.522333 1.534988 -0.017521 -0.056191 0.437400 1.304359 0.421225 -0.229724 0.856944 0.363658 -0.300702 0.199378 -0.091915
28 0.020009 -0.529617 -0.687843 -1.068170 -0.317981 -0.873679 2.545261 0.319549 0.389927 -1.527578 -0.515574 -1.761353 0.997758
29 0.294921 -0.175540 -2.170589 0.206338 0.608432 -2.666144 -0.240215 1.450295 1.100392 -1.316262 0.000504 -3.910703 -0.679615
... ... ... ... ... ... ... ... ... ... ... ... ... ...
195 -0.713121 -0.124100 0.184524 -0.289193 1.143155 -0.040337 -0.456140 0.386906 -0.409479 -0.013040 -0.448614 0.678472 0.856538
196 0.088904 -0.480222 0.314568 0.011773 -0.871197 -0.254444 -1.214378 0.074859 0.805154 0.107568 -0.101697 0.725634 -0.674412
197 -0.819978 -0.214030 0.603520 -0.224844 0.008606 0.221715 0.248298 0.580332 0.548519 1.036227 0.084133 -0.973833 -1.118049
198 0.089868 -0.642488 -0.163873 0.162352 -2.220413 -1.138568 -0.479671 2.364939 0.560742 -0.314032 1.123286 0.586946 1.028865
199 0.490718 -0.570100 -0.601226 -1.373455 -2.219961 -1.917807 -1.874507 2.571947 0.227763 -1.470442 0.797456 0.390489 0.443397
200 -0.725311 0.995058 1.698440 -0.409174 1.345314 -0.371509 0.853418 0.950027 0.524004 -0.776995 0.384800 0.564620 0.242242
201 -0.088956 0.381477 0.665527 0.871194 0.436489 0.159443 0.297415 0.889031 -0.725496 -0.079337 0.812380 0.888138 0.315317
202 -0.627833 0.625737 0.574533 0.351258 0.679287 0.045521 -0.005976 -0.356564 -0.077959 -0.043215 1.410034 0.680198 0.118446
203 -0.591412 -1.691897 -1.280443 0.004164 0.029231 1.050215 1.339125 0.772476 -0.548882 -0.875977 -0.739724 -0.002660 0.550573
204 -0.213359 -0.655305 -0.545729 -0.525488 -0.788705 -0.715509 0.006099 0.426857 0.380788 0.708127 1.161262 0.582157 0.085247
205 0.561065 -0.270819 -0.376934 1.362033 -0.711445 1.354311 -0.206743 -0.142823 1.553667 -0.536125 -1.684840 0.688414 -1.009051
206 0.905511 0.711971 1.427482 0.086910 -0.261378 0.561744 0.222402 -1.622424 0.024138 0.934010 -0.468465 0.170884 0.221140
207 0.067531 0.386370 -0.307384 -0.040460 -0.049760 -0.032394 0.431908 1.042755 -1.011654 -0.412244 -0.108096 0.184730 -0.138904
208 -0.059758 0.045608 -0.194712 -0.038341 -0.248229 0.218675 -0.402325 0.773505 0.041528 0.766170 -0.042900 0.536066 0.856860
209 0.486749 -0.756717 -0.874808 -0.729025 -1.528664 0.066851 -0.217409 2.535472 0.810467 2.007519 -1.633543 1.078687 -1.214695
210 -0.009918 0.023157 -0.108599 -0.353482 -0.525150 0.026241 -0.209046 0.952549 0.118271 0.761473 -0.021901 0.723007 1.176221
211 -0.852239 -0.126721 1.768756 -0.139569 1.457419 -1.896514 0.722738 -1.858343 1.023542 0.337423 -1.696471 0.797698 -1.801833
212 -1.605282 0.546705 -0.027523 -0.007901 0.390982 0.752113 0.108134 -0.532402 -0.658558 -0.655673 -0.110552 -0.038507 0.564082
213 -1.537486 0.438542 -0.054954 -0.009054 0.565426 0.944990 0.000999 -0.699569 -0.616522 -0.546167 0.075944 0.000029 0.772172
214 0.478176 -0.623588 -1.163628 -0.024044 -0.377051 0.114672 -1.189664 -0.599743 0.064422 -0.284247 0.793914 0.752339 -0.558744
215 -0.653553 -0.272142 0.596156 0.881373 -2.295187 0.283720 -0.193981 -0.067370 0.777762 -0.959991 -0.275185 0.990175 -2.038870
216 -0.746791 -0.229040 0.929885 0.869993 -2.913181 0.212781 -0.305148 -0.195613 1.117807 -0.935236 -0.012361 1.696083 -2.096406
217 -0.402132 0.567649 0.658617 1.252447 0.282722 -0.867962 -0.658417 -0.711252 0.412558 0.018922 -0.656841 -0.668848 -0.606249
218 0.253416 2.028743 -0.249389 -0.135717 -0.432099 -1.127803 0.330577 -0.150249 1.400036 -0.237307 -1.036935 0.796314 0.315125
219 -0.268737 0.304053 0.442309 1.466913 -0.190859 -1.339833 -0.412332 -0.934782 -0.340795 -0.803146 -0.913412 -1.601519 -0.895268
220 0.164485 1.445490 2.674724 -0.663649 -0.232015 1.428702 0.961717 -2.600552 -0.356496 0.842619 2.796380 0.923058 1.962832
221 0.702551 0.697481 0.141117 -0.647568 0.265119 0.543185 0.967290 -0.204736 -0.388782 -1.297479 1.446403 1.404421 0.191658
222 0.727431 0.078441 -0.027658 -0.293281 -0.116893 0.762821 -0.474665 -0.142512 -0.107789 0.767739 -0.106817 -0.016677 0.681705
223 -1.601068 0.456897 -0.776221 -0.202831 0.972321 1.554034 1.293388 0.533103 -0.658778 -0.011963 -0.965611 1.297730 2.334936
224 -0.775289 -1.780714 -0.773207 -0.130797 -0.258296 0.465109 0.964189 -0.054493 -0.318554 -0.116591 0.490944 0.263715 0.162778

225 rows × 13 columns

In [178]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[178]:
[2925.0,
 2654.4694551642833,
 2487.537140339455,
 2347.3385606542174,
 2241.219607532395,
 2187.984558812649,
 2078.2651666536785,
 1992.3748205985125,
 1984.6213758642439,
 1904.4598519281963,
 1850.0132981301954,
 1816.2106987200941,
 1761.4652710138919,
 1722.4326028221699]
In [179]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[179]:
[<matplotlib.lines.Line2D at 0x1e82f5d1048>]

K=2

In [180]:
kmeans_mfcc = KMeans(n_clusters=2, random_state=0, n_init=10)
kmeans_mfcc.fit(X)
Out[180]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=2, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [181]:
kmeans_mfcc.labels_
Out[181]:
array([1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1,
       0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0,
       0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1,
       0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0,
       1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1,
       0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1,
       0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1,
       1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0,
       0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0,
       0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1,
       1, 1, 1, 1, 0])
In [182]:
clusters_mfcc = kmeans_mfcc.predict(X)
clusters_mfcc
Out[182]:
array([1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1,
       0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0,
       0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1,
       0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0,
       1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1,
       0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1,
       0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1,
       1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0,
       0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0,
       0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1,
       1, 1, 1, 1, 0])
In [183]:
X.loc[:,'Cluster'] = clusters_mfcc
X.loc[:,'chosen'] = list(y)
In [184]:
X
Out[184]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13 Cluster chosen
0 -1.430409 -0.286045 0.490919 0.872836 -0.908379 -0.032724 -0.630149 -0.077256 -0.666116 -0.409507 -0.808189 -0.316827 1.001635 1 0
1 -0.282157 -1.478798 -1.125065 1.134727 0.294401 -1.552108 -2.048764 -0.332843 -0.298952 1.114161 0.274322 1.796602 2.309045 1 0
2 -0.419749 0.440468 0.853257 0.239250 -0.398831 -0.780274 -0.360447 -0.316940 -0.976474 0.879549 -0.457280 0.640345 0.645756 1 0
3 -0.431942 -1.517593 0.673149 0.786628 -1.306695 1.366669 1.142564 -2.034919 -0.374780 0.475572 -0.952521 0.198178 0.640593 1 0
4 0.011173 0.536562 -0.966199 -0.482951 0.331291 0.606478 0.539582 -0.270964 0.042364 -0.165555 0.123590 0.101357 -0.658096 0 0
5 0.126251 -0.590904 -1.517639 -1.072279 0.624234 1.023491 0.771080 0.598433 -0.513511 -0.860661 0.652573 1.694095 0.860230 0 0
6 0.830490 -0.982750 2.985373 1.132038 0.506664 3.816969 -0.181042 -0.077794 -0.404476 -0.632689 -0.730525 -0.299915 -0.619079 1 0
7 -0.461917 0.208363 0.278132 2.301636 1.667266 0.690336 1.117026 1.428294 1.041434 0.404890 -1.225161 -1.083934 0.065409 1 0
8 0.249199 -0.247143 -0.916299 0.679460 -0.342859 0.174455 0.125219 0.900110 0.761341 -0.398986 -1.063148 0.855251 2.099798 1 0
9 0.311144 1.364526 -0.004701 0.405862 -0.939024 0.796235 -0.589807 -0.084900 0.092344 -0.628186 -0.601718 -0.966679 0.086483 1 0
10 1.334587 0.242352 0.597807 1.994512 -0.050194 0.166573 -0.234755 0.602868 -1.535513 -3.732625 -0.901353 0.263896 -1.060994 1 0
11 1.318614 -0.797973 0.011175 -0.735851 -0.310309 -1.472949 -0.886459 0.062987 -0.118121 0.096635 0.405075 -1.082018 0.068160 0 0
12 0.824621 1.143049 0.195198 -0.036161 0.107483 -0.112574 -0.229915 -0.249936 0.727625 -0.048902 0.611366 0.128240 0.758492 1 0
13 -0.568472 -0.848858 0.090767 1.154534 -0.215999 -2.659688 -1.881352 0.412350 -0.790718 0.827656 0.539638 1.593572 2.002635 1 0
14 0.406968 1.201176 1.075155 -0.124700 0.135767 -0.178145 -0.204776 -1.618374 0.193121 0.693318 1.055785 -0.160648 0.537962 1 0
15 -0.287723 1.515404 0.539186 0.427811 -0.006824 0.845973 0.412855 -0.313048 0.703395 0.284501 -0.348641 -0.519018 -0.664237 1 0
16 0.522131 0.617054 0.277861 -0.836147 -1.629099 -0.005864 0.191114 0.043306 0.248219 0.814197 -0.101490 -0.978501 -0.398761 0 0
17 -0.896889 -1.308091 -0.693222 0.501341 -0.162478 -0.037095 -0.270506 0.346239 0.691093 -0.015841 -0.871109 -1.169731 0.270587 0 0
18 0.875184 0.085665 0.080385 0.242890 -0.938716 -1.140718 -0.097002 -0.428159 -0.490553 -0.095430 -0.233748 -0.812311 0.895950 1 0
19 -0.659140 -0.549349 -0.129098 1.107800 -3.028026 -0.963090 0.170241 -2.196582 -0.518279 1.312693 -1.543068 -0.398880 0.589793 1 0
20 -0.563764 -2.009854 0.074903 -0.464555 -1.109839 0.088831 -0.566914 0.384785 -0.625854 -0.723645 -1.000855 0.809858 0.131808 0 0
21 1.250269 0.010904 0.201698 -0.957619 -0.013951 1.101073 -0.075927 -0.502371 -1.866004 0.114645 0.156305 0.629372 1.366339 1 0
22 -1.190023 -0.268956 -0.555588 -0.621566 -0.230031 0.264870 -0.241968 0.258810 0.329697 0.359684 0.486221 0.532413 0.397267 0 0
23 -0.802365 0.152404 -0.567015 0.381822 0.592009 -0.562610 -1.563042 0.721323 2.941459 2.101624 0.519023 -0.366100 -0.253206 0 0
24 -0.893865 0.341962 -0.830315 0.329795 0.563212 -0.327526 -1.488529 0.513889 2.829533 2.088829 0.636581 -0.214855 -0.258980 0 0
25 -0.121562 1.347439 0.744281 0.698770 -0.746534 -0.657656 -0.027263 -0.447918 0.338675 1.028922 0.807479 0.808201 -0.400533 1 0
26 -1.615036 1.323285 0.625582 0.721012 -0.691466 -0.853918 -0.638037 -0.521595 0.602985 0.889454 -0.304429 -1.597203 -0.132420 1 0
27 -0.522333 1.534988 -0.017521 -0.056191 0.437400 1.304359 0.421225 -0.229724 0.856944 0.363658 -0.300702 0.199378 -0.091915 1 0
28 0.020009 -0.529617 -0.687843 -1.068170 -0.317981 -0.873679 2.545261 0.319549 0.389927 -1.527578 -0.515574 -1.761353 0.997758 0 0
29 0.294921 -0.175540 -2.170589 0.206338 0.608432 -2.666144 -0.240215 1.450295 1.100392 -1.316262 0.000504 -3.910703 -0.679615 0 0
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
195 -0.713121 -0.124100 0.184524 -0.289193 1.143155 -0.040337 -0.456140 0.386906 -0.409479 -0.013040 -0.448614 0.678472 0.856538 1 1
196 0.088904 -0.480222 0.314568 0.011773 -0.871197 -0.254444 -1.214378 0.074859 0.805154 0.107568 -0.101697 0.725634 -0.674412 0 1
197 -0.819978 -0.214030 0.603520 -0.224844 0.008606 0.221715 0.248298 0.580332 0.548519 1.036227 0.084133 -0.973833 -1.118049 0 1
198 0.089868 -0.642488 -0.163873 0.162352 -2.220413 -1.138568 -0.479671 2.364939 0.560742 -0.314032 1.123286 0.586946 1.028865 0 1
199 0.490718 -0.570100 -0.601226 -1.373455 -2.219961 -1.917807 -1.874507 2.571947 0.227763 -1.470442 0.797456 0.390489 0.443397 0 1
200 -0.725311 0.995058 1.698440 -0.409174 1.345314 -0.371509 0.853418 0.950027 0.524004 -0.776995 0.384800 0.564620 0.242242 1 1
201 -0.088956 0.381477 0.665527 0.871194 0.436489 0.159443 0.297415 0.889031 -0.725496 -0.079337 0.812380 0.888138 0.315317 1 1
202 -0.627833 0.625737 0.574533 0.351258 0.679287 0.045521 -0.005976 -0.356564 -0.077959 -0.043215 1.410034 0.680198 0.118446 1 1
203 -0.591412 -1.691897 -1.280443 0.004164 0.029231 1.050215 1.339125 0.772476 -0.548882 -0.875977 -0.739724 -0.002660 0.550573 0 1
204 -0.213359 -0.655305 -0.545729 -0.525488 -0.788705 -0.715509 0.006099 0.426857 0.380788 0.708127 1.161262 0.582157 0.085247 0 1
205 0.561065 -0.270819 -0.376934 1.362033 -0.711445 1.354311 -0.206743 -0.142823 1.553667 -0.536125 -1.684840 0.688414 -1.009051 0 1
206 0.905511 0.711971 1.427482 0.086910 -0.261378 0.561744 0.222402 -1.622424 0.024138 0.934010 -0.468465 0.170884 0.221140 1 1
207 0.067531 0.386370 -0.307384 -0.040460 -0.049760 -0.032394 0.431908 1.042755 -1.011654 -0.412244 -0.108096 0.184730 -0.138904 0 1
208 -0.059758 0.045608 -0.194712 -0.038341 -0.248229 0.218675 -0.402325 0.773505 0.041528 0.766170 -0.042900 0.536066 0.856860 1 1
209 0.486749 -0.756717 -0.874808 -0.729025 -1.528664 0.066851 -0.217409 2.535472 0.810467 2.007519 -1.633543 1.078687 -1.214695 0 1
210 -0.009918 0.023157 -0.108599 -0.353482 -0.525150 0.026241 -0.209046 0.952549 0.118271 0.761473 -0.021901 0.723007 1.176221 1 1
211 -0.852239 -0.126721 1.768756 -0.139569 1.457419 -1.896514 0.722738 -1.858343 1.023542 0.337423 -1.696471 0.797698 -1.801833 1 1
212 -1.605282 0.546705 -0.027523 -0.007901 0.390982 0.752113 0.108134 -0.532402 -0.658558 -0.655673 -0.110552 -0.038507 0.564082 1 1
213 -1.537486 0.438542 -0.054954 -0.009054 0.565426 0.944990 0.000999 -0.699569 -0.616522 -0.546167 0.075944 0.000029 0.772172 1 1
214 0.478176 -0.623588 -1.163628 -0.024044 -0.377051 0.114672 -1.189664 -0.599743 0.064422 -0.284247 0.793914 0.752339 -0.558744 0 1
215 -0.653553 -0.272142 0.596156 0.881373 -2.295187 0.283720 -0.193981 -0.067370 0.777762 -0.959991 -0.275185 0.990175 -2.038870 0 1
216 -0.746791 -0.229040 0.929885 0.869993 -2.913181 0.212781 -0.305148 -0.195613 1.117807 -0.935236 -0.012361 1.696083 -2.096406 0 1
217 -0.402132 0.567649 0.658617 1.252447 0.282722 -0.867962 -0.658417 -0.711252 0.412558 0.018922 -0.656841 -0.668848 -0.606249 1 1
218 0.253416 2.028743 -0.249389 -0.135717 -0.432099 -1.127803 0.330577 -0.150249 1.400036 -0.237307 -1.036935 0.796314 0.315125 1 1
219 -0.268737 0.304053 0.442309 1.466913 -0.190859 -1.339833 -0.412332 -0.934782 -0.340795 -0.803146 -0.913412 -1.601519 -0.895268 1 1
220 0.164485 1.445490 2.674724 -0.663649 -0.232015 1.428702 0.961717 -2.600552 -0.356496 0.842619 2.796380 0.923058 1.962832 1 1
221 0.702551 0.697481 0.141117 -0.647568 0.265119 0.543185 0.967290 -0.204736 -0.388782 -1.297479 1.446403 1.404421 0.191658 1 1
222 0.727431 0.078441 -0.027658 -0.293281 -0.116893 0.762821 -0.474665 -0.142512 -0.107789 0.767739 -0.106817 -0.016677 0.681705 1 1
223 -1.601068 0.456897 -0.776221 -0.202831 0.972321 1.554034 1.293388 0.533103 -0.658778 -0.011963 -0.965611 1.297730 2.334936 1 1
224 -0.775289 -1.780714 -0.773207 -0.130797 -0.258296 0.465109 0.964189 -0.054493 -0.318554 -0.116591 0.490944 0.263715 0.162778 0 1

225 rows × 15 columns

In [185]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[185]:
<matplotlib.axes._subplots.AxesSubplot at 0x1e82f5fadd8>

Tonal Centroid

In [186]:
df_n_ps_std[0].columns
Out[186]:
Index(['durationfiles', 'rmsfiles', 'rmsmedianfiles', 'lowenergyfiles',
       'ASRfiles', 'beatspectrumfiles', 'eventdensityfiles', 'tempofiles',
       'pulseclarityfiles', 'zerocrossfiles', 'rolloffsfiles',
       'brightnessfiles', 'spreadfiles', 'centroidfiles', 'kurtosisfiles',
       'flatnessfiles', 'entropyfiles', 'mfccfiles_1', 'mfccfiles_2',
       'mfccfiles_3', 'mfccfiles_4', 'mfccfiles_5', 'mfccfiles_6',
       'mfccfiles_7', 'mfccfiles_8', 'mfccfiles_9', 'mfccfiles_10',
       'mfccfiles_11', 'mfccfiles_12', 'mfccfiles_13', 'inharmonicityfiles',
       'bestkeyfiles', 'keyclarityfiles', 'modalityfiles',
       'tonalcentroidfiles_1', 'tonalcentroidfiles_2', 'tonalcentroidfiles_3',
       'tonalcentroidfiles_4', 'tonalcentroidfiles_5', 'tonalcentroidfiles_6',
       'chromagramfiles_1', 'chromagramfiles_2', 'chromagramfiles_3',
       'chromagramfiles_4', 'chromagramfiles_5', 'chromagramfiles_6',
       'chromagramfiles_7', 'chromagramfiles_8', 'chromagramfiles_9',
       'chromagramfiles_10', 'chromagramfiles_11', 'chromagramfiles_12',
       'attackslopefiles', 'attackleapfiles', 'chosen'],
      dtype='object')
In [187]:
df_n_ps_std[0].columns[34:40]
Out[187]:
Index(['tonalcentroidfiles_1', 'tonalcentroidfiles_2', 'tonalcentroidfiles_3',
       'tonalcentroidfiles_4', 'tonalcentroidfiles_5', 'tonalcentroidfiles_6'],
      dtype='object')
In [188]:
df_n_ps_std_tc = [None]*len(companies)
for i in range(len(companies)):
    df_n_ps_std_tc[i] = pd.DataFrame(df_n_ps_std[i].iloc[:,34:40])
    df_n_ps_std_tc[i].columns=df_n_ps_std[i].columns[34:40]
df_n_ps_std_tc[0].info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 372 entries, 0 to 371
Data columns (total 6 columns):
tonalcentroidfiles_1    372 non-null float64
tonalcentroidfiles_2    372 non-null float64
tonalcentroidfiles_3    372 non-null float64
tonalcentroidfiles_4    372 non-null float64
tonalcentroidfiles_5    372 non-null float64
tonalcentroidfiles_6    372 non-null float64
dtypes: float64(6)
memory usage: 17.5 KB

Arte Francés

ANN

In [189]:
X = df_n_ps_std_tc[0]
In [190]:
y = df_n_ps[0]['chosen']
In [191]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [192]:
X_train.shape
Out[192]:
(279, 6)
In [193]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [194]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [195]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [196]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'tanh', 'hidden_layer_sizes': (30, 30, 30), 'learning_rate_init': 0.02, 'max_iter': 2000}, que permiten obtener un Accuracy de 78.85% y un Kappa del 15.23
Tiempo total: 15.53 minutos
In [207]:
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [208]:
input_tensor = Input(shape = (n0,))
In [209]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = 'tanh')(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [210]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [211]:
model.summary()
Model: "model_8"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_8 (InputLayer)         (None, 6)                 0         
_________________________________________________________________
dense_24 (Dense)             (None, 30)                210       
_________________________________________________________________
dense_25 (Dense)             (None, 30)                930       
_________________________________________________________________
dense_26 (Dense)             (None, 30)                930       
_________________________________________________________________
dense_27 (Dense)             (None, 1)                 31        
=================================================================
Total params: 2,101
Trainable params: 2,101
Non-trainable params: 0
_________________________________________________________________
In [212]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), batch_size= 32,
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 279 samples, validate on 93 samples
Epoch 1/2000
279/279 [==============================] - 0s 659us/step - loss: 0.6546 - accuracy: 0.6631 - val_loss: 0.6013 - val_accuracy: 0.7634
Epoch 2/2000
279/279 [==============================] - 0s 57us/step - loss: 0.5470 - accuracy: 0.7634 - val_loss: 0.5640 - val_accuracy: 0.7527
Epoch 3/2000
279/279 [==============================] - 0s 50us/step - loss: 0.5241 - accuracy: 0.7706 - val_loss: 0.5577 - val_accuracy: 0.7527
Epoch 4/2000
279/279 [==============================] - 0s 54us/step - loss: 0.5035 - accuracy: 0.7563 - val_loss: 0.5561 - val_accuracy: 0.7527
Epoch 5/2000
279/279 [==============================] - 0s 54us/step - loss: 0.4957 - accuracy: 0.7706 - val_loss: 0.5495 - val_accuracy: 0.7527
Epoch 6/2000
279/279 [==============================] - 0s 54us/step - loss: 0.4862 - accuracy: 0.7599 - val_loss: 0.5352 - val_accuracy: 0.7527
Epoch 7/2000
279/279 [==============================] - 0s 57us/step - loss: 0.4851 - accuracy: 0.7742 - val_loss: 0.5304 - val_accuracy: 0.7634
Epoch 8/2000
279/279 [==============================] - 0s 54us/step - loss: 0.4595 - accuracy: 0.7778 - val_loss: 0.5112 - val_accuracy: 0.7419
Epoch 9/2000
279/279 [==============================] - 0s 54us/step - loss: 0.4720 - accuracy: 0.7921 - val_loss: 0.5261 - val_accuracy: 0.7527
Epoch 10/2000
279/279 [==============================] - 0s 54us/step - loss: 0.4575 - accuracy: 0.7957 - val_loss: 0.5074 - val_accuracy: 0.7527
Epoch 11/2000
279/279 [==============================] - 0s 50us/step - loss: 0.4290 - accuracy: 0.7921 - val_loss: 0.5122 - val_accuracy: 0.7419

Epoch 00011: ReduceLROnPlateau reducing learning rate to 0.009999999776482582.
Epoch 12/2000
279/279 [==============================] - 0s 57us/step - loss: 0.3950 - accuracy: 0.8208 - val_loss: 0.5183 - val_accuracy: 0.7634
Epoch 13/2000
279/279 [==============================] - 0s 61us/step - loss: 0.3718 - accuracy: 0.8244 - val_loss: 0.4976 - val_accuracy: 0.7634
Epoch 14/2000
279/279 [==============================] - 0s 79us/step - loss: 0.3658 - accuracy: 0.8208 - val_loss: 0.5062 - val_accuracy: 0.7527
Epoch 15/2000
279/279 [==============================] - 0s 57us/step - loss: 0.3475 - accuracy: 0.8459 - val_loss: 0.5192 - val_accuracy: 0.7419
Epoch 16/2000
279/279 [==============================] - 0s 57us/step - loss: 0.3291 - accuracy: 0.8495 - val_loss: 0.5023 - val_accuracy: 0.7419
Epoch 17/2000
279/279 [==============================] - 0s 57us/step - loss: 0.3115 - accuracy: 0.8566 - val_loss: 0.4928 - val_accuracy: 0.7634
Epoch 18/2000
279/279 [==============================] - 0s 64us/step - loss: 0.2897 - accuracy: 0.8674 - val_loss: 0.5426 - val_accuracy: 0.7957
Epoch 19/2000
279/279 [==============================] - 0s 61us/step - loss: 0.2844 - accuracy: 0.8853 - val_loss: 0.4936 - val_accuracy: 0.7957
Epoch 20/2000
279/279 [==============================] - 0s 57us/step - loss: 0.2653 - accuracy: 0.8746 - val_loss: 0.5283 - val_accuracy: 0.7419
Epoch 21/2000
279/279 [==============================] - 0s 57us/step - loss: 0.2578 - accuracy: 0.8889 - val_loss: 0.5283 - val_accuracy: 0.7849
Epoch 22/2000
279/279 [==============================] - 0s 72us/step - loss: 0.2388 - accuracy: 0.9032 - val_loss: 0.5246 - val_accuracy: 0.8065
Epoch 23/2000
279/279 [==============================] - 0s 72us/step - loss: 0.2229 - accuracy: 0.9140 - val_loss: 0.5452 - val_accuracy: 0.7527
Epoch 24/2000
279/279 [==============================] - 0s 57us/step - loss: 0.2068 - accuracy: 0.9247 - val_loss: 0.5425 - val_accuracy: 0.7742
Epoch 25/2000
279/279 [==============================] - 0s 75us/step - loss: 0.1910 - accuracy: 0.9247 - val_loss: 0.6091 - val_accuracy: 0.7742
Epoch 26/2000
279/279 [==============================] - 0s 64us/step - loss: 0.1861 - accuracy: 0.9104 - val_loss: 0.5379 - val_accuracy: 0.7742
Epoch 27/2000
279/279 [==============================] - 0s 61us/step - loss: 0.1750 - accuracy: 0.9319 - val_loss: 0.6090 - val_accuracy: 0.7634
Epoch 28/2000
279/279 [==============================] - 0s 57us/step - loss: 0.1684 - accuracy: 0.9355 - val_loss: 0.6083 - val_accuracy: 0.7419
Epoch 29/2000
279/279 [==============================] - 0s 61us/step - loss: 0.1458 - accuracy: 0.9391 - val_loss: 0.6428 - val_accuracy: 0.7742
Epoch 30/2000
279/279 [==============================] - 0s 61us/step - loss: 0.1362 - accuracy: 0.9355 - val_loss: 0.6483 - val_accuracy: 0.7527
Epoch 31/2000
279/279 [==============================] - 0s 57us/step - loss: 0.1184 - accuracy: 0.9570 - val_loss: 0.6489 - val_accuracy: 0.7312
Epoch 32/2000
279/279 [==============================] - 0s 57us/step - loss: 0.1156 - accuracy: 0.9570 - val_loss: 0.7299 - val_accuracy: 0.7419

Epoch 00032: ReduceLROnPlateau reducing learning rate to 0.004999999888241291.
Epoch 33/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0957 - accuracy: 0.9677 - val_loss: 0.7016 - val_accuracy: 0.7419
Epoch 34/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0896 - accuracy: 0.9821 - val_loss: 0.6892 - val_accuracy: 0.7742
Epoch 35/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0822 - accuracy: 0.9857 - val_loss: 0.7386 - val_accuracy: 0.7527
Epoch 36/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0810 - accuracy: 0.9892 - val_loss: 0.7440 - val_accuracy: 0.7634
Epoch 37/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0737 - accuracy: 0.9892 - val_loss: 0.7301 - val_accuracy: 0.7634
Epoch 38/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0697 - accuracy: 0.9928 - val_loss: 0.7536 - val_accuracy: 0.7742
Epoch 39/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0679 - accuracy: 0.9928 - val_loss: 0.7519 - val_accuracy: 0.7634
Epoch 40/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0633 - accuracy: 0.9892 - val_loss: 0.7982 - val_accuracy: 0.7527
Epoch 41/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0619 - accuracy: 0.9892 - val_loss: 0.7953 - val_accuracy: 0.7742
Epoch 42/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0591 - accuracy: 0.9928 - val_loss: 0.7795 - val_accuracy: 0.7634

Epoch 00042: ReduceLROnPlateau reducing learning rate to 0.0024999999441206455.
Epoch 43/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0544 - accuracy: 0.9892 - val_loss: 0.7961 - val_accuracy: 0.7634
Epoch 44/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0498 - accuracy: 0.9928 - val_loss: 0.8283 - val_accuracy: 0.7634
Epoch 45/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0503 - accuracy: 0.9928 - val_loss: 0.8277 - val_accuracy: 0.7849
Epoch 46/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0482 - accuracy: 0.9928 - val_loss: 0.8270 - val_accuracy: 0.7634
Epoch 47/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0465 - accuracy: 0.9928 - val_loss: 0.8391 - val_accuracy: 0.7634
Epoch 48/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0459 - accuracy: 0.9964 - val_loss: 0.8561 - val_accuracy: 0.7419
Epoch 49/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0442 - accuracy: 0.9928 - val_loss: 0.8438 - val_accuracy: 0.7634
Epoch 50/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0435 - accuracy: 0.9964 - val_loss: 0.8739 - val_accuracy: 0.7419
Epoch 51/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0407 - accuracy: 0.9964 - val_loss: 0.8670 - val_accuracy: 0.7634
Epoch 52/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0411 - accuracy: 0.9928 - val_loss: 0.8566 - val_accuracy: 0.7527

Epoch 00052: ReduceLROnPlateau reducing learning rate to 0.0012499999720603228.
Epoch 53/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0384 - accuracy: 0.9928 - val_loss: 0.8780 - val_accuracy: 0.7634
Epoch 54/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0374 - accuracy: 0.9964 - val_loss: 0.8862 - val_accuracy: 0.7634
Epoch 55/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0365 - accuracy: 0.9964 - val_loss: 0.8844 - val_accuracy: 0.7634
Epoch 56/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0360 - accuracy: 0.9964 - val_loss: 0.8884 - val_accuracy: 0.7634
Epoch 57/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0353 - accuracy: 0.9964 - val_loss: 0.8878 - val_accuracy: 0.7527
Epoch 58/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0349 - accuracy: 0.9964 - val_loss: 0.8975 - val_accuracy: 0.7527
Epoch 59/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0347 - accuracy: 0.9964 - val_loss: 0.8923 - val_accuracy: 0.7634
Epoch 60/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0337 - accuracy: 0.9964 - val_loss: 0.8985 - val_accuracy: 0.7742
Epoch 61/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0331 - accuracy: 0.9964 - val_loss: 0.9074 - val_accuracy: 0.7742
Epoch 62/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0325 - accuracy: 0.9964 - val_loss: 0.9097 - val_accuracy: 0.7742

Epoch 00062: ReduceLROnPlateau reducing learning rate to 0.0006249999860301614.
Epoch 63/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0319 - accuracy: 0.9964 - val_loss: 0.9112 - val_accuracy: 0.7634
Epoch 64/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0316 - accuracy: 0.9964 - val_loss: 0.9168 - val_accuracy: 0.7634
Epoch 65/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0313 - accuracy: 0.9964 - val_loss: 0.9171 - val_accuracy: 0.7634
Epoch 66/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0310 - accuracy: 0.9964 - val_loss: 0.9149 - val_accuracy: 0.7634
Epoch 67/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0306 - accuracy: 0.9964 - val_loss: 0.9193 - val_accuracy: 0.7634
Epoch 68/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0304 - accuracy: 0.9964 - val_loss: 0.9232 - val_accuracy: 0.7634
Epoch 69/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0302 - accuracy: 0.9964 - val_loss: 0.9213 - val_accuracy: 0.7634
Epoch 70/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0299 - accuracy: 0.9964 - val_loss: 0.9208 - val_accuracy: 0.7634
Epoch 71/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0296 - accuracy: 0.9964 - val_loss: 0.9229 - val_accuracy: 0.7634
Epoch 72/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0293 - accuracy: 0.9964 - val_loss: 0.9256 - val_accuracy: 0.7634

Epoch 00072: ReduceLROnPlateau reducing learning rate to 0.0003124999930150807.
Epoch 73/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0290 - accuracy: 0.9964 - val_loss: 0.9282 - val_accuracy: 0.7634
Epoch 74/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0289 - accuracy: 0.9964 - val_loss: 0.9280 - val_accuracy: 0.7634
Epoch 75/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0287 - accuracy: 0.9964 - val_loss: 0.9288 - val_accuracy: 0.7634
Epoch 76/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0285 - accuracy: 0.9964 - val_loss: 0.9293 - val_accuracy: 0.7634
Epoch 77/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0285 - accuracy: 0.9964 - val_loss: 0.9310 - val_accuracy: 0.7634
Epoch 78/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0283 - accuracy: 0.9964 - val_loss: 0.9331 - val_accuracy: 0.7634
Epoch 79/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0282 - accuracy: 0.9964 - val_loss: 0.9335 - val_accuracy: 0.7634
Epoch 80/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0280 - accuracy: 1.0000 - val_loss: 0.9355 - val_accuracy: 0.7634
Epoch 81/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0279 - accuracy: 1.0000 - val_loss: 0.9377 - val_accuracy: 0.7634
Epoch 82/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0278 - accuracy: 1.0000 - val_loss: 0.9380 - val_accuracy: 0.7634

Epoch 00082: ReduceLROnPlateau reducing learning rate to 0.00015624999650754035.
Epoch 83/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0276 - accuracy: 1.0000 - val_loss: 0.9395 - val_accuracy: 0.7634
Epoch 84/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0276 - accuracy: 1.0000 - val_loss: 0.9393 - val_accuracy: 0.7634
Epoch 85/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0275 - accuracy: 1.0000 - val_loss: 0.9401 - val_accuracy: 0.7634
Epoch 86/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0274 - accuracy: 1.0000 - val_loss: 0.9402 - val_accuracy: 0.7634
Epoch 87/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0273 - accuracy: 1.0000 - val_loss: 0.9403 - val_accuracy: 0.7634
Epoch 88/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0272 - accuracy: 1.0000 - val_loss: 0.9407 - val_accuracy: 0.7634
Epoch 89/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0272 - accuracy: 1.0000 - val_loss: 0.9410 - val_accuracy: 0.7634
Epoch 90/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0271 - accuracy: 1.0000 - val_loss: 0.9408 - val_accuracy: 0.7634
Epoch 91/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0271 - accuracy: 1.0000 - val_loss: 0.9427 - val_accuracy: 0.7634
Epoch 92/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0270 - accuracy: 1.0000 - val_loss: 0.9434 - val_accuracy: 0.7634

Epoch 00092: ReduceLROnPlateau reducing learning rate to 7.812499825377017e-05.
Epoch 93/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0269 - accuracy: 1.0000 - val_loss: 0.9435 - val_accuracy: 0.7634
Epoch 94/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0269 - accuracy: 1.0000 - val_loss: 0.9433 - val_accuracy: 0.7634
Epoch 95/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0268 - accuracy: 1.0000 - val_loss: 0.9434 - val_accuracy: 0.7634
Epoch 96/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0268 - accuracy: 1.0000 - val_loss: 0.9432 - val_accuracy: 0.7634
Epoch 97/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0268 - accuracy: 1.0000 - val_loss: 0.9438 - val_accuracy: 0.7634
Epoch 98/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0267 - accuracy: 1.0000 - val_loss: 0.9441 - val_accuracy: 0.7634
Epoch 99/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0267 - accuracy: 1.0000 - val_loss: 0.9440 - val_accuracy: 0.7634
Epoch 100/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0267 - accuracy: 1.0000 - val_loss: 0.9444 - val_accuracy: 0.7634
Epoch 101/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0266 - accuracy: 1.0000 - val_loss: 0.9454 - val_accuracy: 0.7634
Epoch 102/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0266 - accuracy: 1.0000 - val_loss: 0.9450 - val_accuracy: 0.7634

Epoch 00102: ReduceLROnPlateau reducing learning rate to 3.9062499126885086e-05.
Epoch 103/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0265 - accuracy: 1.0000 - val_loss: 0.9450 - val_accuracy: 0.7634
Epoch 104/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0265 - accuracy: 1.0000 - val_loss: 0.9451 - val_accuracy: 0.7634
Epoch 105/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0265 - accuracy: 1.0000 - val_loss: 0.9453 - val_accuracy: 0.7634
Epoch 106/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0265 - accuracy: 1.0000 - val_loss: 0.9455 - val_accuracy: 0.7634
Epoch 107/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0265 - accuracy: 1.0000 - val_loss: 0.9456 - val_accuracy: 0.7634
Epoch 108/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0265 - accuracy: 1.0000 - val_loss: 0.9458 - val_accuracy: 0.7634
Epoch 109/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0264 - accuracy: 1.0000 - val_loss: 0.9461 - val_accuracy: 0.7634
Epoch 110/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0264 - accuracy: 1.0000 - val_loss: 0.9462 - val_accuracy: 0.7634
Epoch 111/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0264 - accuracy: 1.0000 - val_loss: 0.9465 - val_accuracy: 0.7634
Epoch 112/2000
279/279 [==============================] - ETA: 0s - loss: 0.0185 - accuracy: 1.00 - 0s 57us/step - loss: 0.0264 - accuracy: 1.0000 - val_loss: 0.9466 - val_accuracy: 0.7634

Epoch 00112: ReduceLROnPlateau reducing learning rate to 1.9531249563442543e-05.
Epoch 113/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0264 - accuracy: 1.0000 - val_loss: 0.9467 - val_accuracy: 0.7634
Epoch 114/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0263 - accuracy: 1.0000 - val_loss: 0.9467 - val_accuracy: 0.7634
Epoch 115/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0263 - accuracy: 1.0000 - val_loss: 0.9467 - val_accuracy: 0.7634
Epoch 116/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0263 - accuracy: 1.0000 - val_loss: 0.9468 - val_accuracy: 0.7634
Epoch 117/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0263 - accuracy: 1.0000 - val_loss: 0.9468 - val_accuracy: 0.7634
Epoch 118/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0263 - accuracy: 1.0000 - val_loss: 0.9468 - val_accuracy: 0.7634
Epoch 119/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0263 - accuracy: 1.0000 - val_loss: 0.9470 - val_accuracy: 0.7634
Epoch 120/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0263 - accuracy: 1.0000 - val_loss: 0.9471 - val_accuracy: 0.7634
Epoch 121/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0263 - accuracy: 1.0000 - val_loss: 0.9472 - val_accuracy: 0.7634
Epoch 122/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0263 - accuracy: 1.0000 - val_loss: 0.9473 - val_accuracy: 0.7634

Epoch 00122: ReduceLROnPlateau reducing learning rate to 9.765624781721272e-06.
Epoch 123/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0263 - accuracy: 1.0000 - val_loss: 0.9473 - val_accuracy: 0.7634
Epoch 124/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0263 - accuracy: 1.0000 - val_loss: 0.9473 - val_accuracy: 0.7634
Epoch 125/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0263 - accuracy: 1.0000 - val_loss: 0.9474 - val_accuracy: 0.7634
Epoch 126/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9474 - val_accuracy: 0.7634
Epoch 127/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9474 - val_accuracy: 0.7634
Epoch 128/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9475 - val_accuracy: 0.7634
Epoch 129/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9475 - val_accuracy: 0.7634
Epoch 130/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9476 - val_accuracy: 0.7634
Epoch 131/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9476 - val_accuracy: 0.7634
Epoch 132/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9476 - val_accuracy: 0.7634

Epoch 00132: ReduceLROnPlateau reducing learning rate to 4.882812390860636e-06.
Epoch 133/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9477 - val_accuracy: 0.7634
Epoch 134/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9477 - val_accuracy: 0.7634
Epoch 135/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9477 - val_accuracy: 0.7634
Epoch 136/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9477 - val_accuracy: 0.7634
Epoch 137/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9477 - val_accuracy: 0.7634
Epoch 138/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9477 - val_accuracy: 0.7634
Epoch 139/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9478 - val_accuracy: 0.7634
Epoch 140/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9478 - val_accuracy: 0.7634
Epoch 141/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9478 - val_accuracy: 0.7634
Epoch 142/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9478 - val_accuracy: 0.7634

Epoch 00142: ReduceLROnPlateau reducing learning rate to 2.441406195430318e-06.
Epoch 143/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9478 - val_accuracy: 0.7634
Epoch 144/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9478 - val_accuracy: 0.7634
Epoch 145/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9479 - val_accuracy: 0.7634
Epoch 146/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9479 - val_accuracy: 0.7634
Epoch 147/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9479 - val_accuracy: 0.7634
Epoch 148/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9479 - val_accuracy: 0.7634
Epoch 149/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9479 - val_accuracy: 0.7634
Epoch 150/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9479 - val_accuracy: 0.7634
Epoch 151/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9479 - val_accuracy: 0.7634
Epoch 152/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9479 - val_accuracy: 0.7634

Epoch 00152: ReduceLROnPlateau reducing learning rate to 1.220703097715159e-06.
Epoch 153/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9479 - val_accuracy: 0.7634
Epoch 154/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9479 - val_accuracy: 0.7634
Epoch 155/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 156/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 157/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 158/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 159/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 160/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 161/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 162/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00162: ReduceLROnPlateau reducing learning rate to 6.103515488575795e-07.
Epoch 163/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 164/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 165/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 166/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 167/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 168/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 169/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 170/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 171/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 172/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00172: ReduceLROnPlateau reducing learning rate to 3.0517577442878974e-07.
Epoch 173/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 174/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 175/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 176/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 177/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 178/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 179/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 180/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 181/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 182/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00182: ReduceLROnPlateau reducing learning rate to 1.5258788721439487e-07.
Epoch 183/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 184/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 185/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 186/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 187/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 188/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 189/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 190/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 191/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 192/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00192: ReduceLROnPlateau reducing learning rate to 7.629394360719743e-08.
Epoch 193/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 194/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 195/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 196/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 197/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 198/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 199/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 200/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 201/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 202/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00202: ReduceLROnPlateau reducing learning rate to 3.814697180359872e-08.
Epoch 203/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 204/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 205/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 206/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 207/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 208/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 209/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 210/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 211/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 212/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00212: ReduceLROnPlateau reducing learning rate to 1.907348590179936e-08.
Epoch 213/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 214/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 215/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 216/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 217/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 218/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 219/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 220/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 221/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 222/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00222: ReduceLROnPlateau reducing learning rate to 9.53674295089968e-09.
Epoch 223/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 224/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 225/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 226/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 227/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 228/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 229/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 230/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 231/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 232/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00232: ReduceLROnPlateau reducing learning rate to 4.76837147544984e-09.
Epoch 233/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 234/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 235/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 236/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 237/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 238/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 239/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 240/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 241/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 242/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00242: ReduceLROnPlateau reducing learning rate to 2.38418573772492e-09.
Epoch 243/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 244/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 245/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 246/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 247/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 248/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 249/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 250/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 251/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 252/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00252: ReduceLROnPlateau reducing learning rate to 1.19209286886246e-09.
Epoch 253/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 254/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 255/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 256/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 257/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 258/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 259/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 260/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 261/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 262/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00262: ReduceLROnPlateau reducing learning rate to 5.9604643443123e-10.
Epoch 263/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 264/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 265/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 266/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 267/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 268/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 269/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 270/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 271/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 272/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00272: ReduceLROnPlateau reducing learning rate to 2.98023217215615e-10.
Epoch 273/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 274/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 275/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 276/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 277/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 278/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 279/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 280/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 281/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 282/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00282: ReduceLROnPlateau reducing learning rate to 1.490116086078075e-10.
Epoch 283/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 284/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 285/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 286/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 287/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 288/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 289/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 290/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 291/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 292/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00292: ReduceLROnPlateau reducing learning rate to 7.450580430390374e-11.
Epoch 293/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 294/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 295/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 296/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 297/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 298/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 299/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 300/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 301/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 302/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00302: ReduceLROnPlateau reducing learning rate to 3.725290215195187e-11.
Epoch 303/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 304/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 305/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 306/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 307/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 308/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 309/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 310/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 311/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 312/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00312: ReduceLROnPlateau reducing learning rate to 1.8626451075975936e-11.
Epoch 313/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 314/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 315/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 316/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 317/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 318/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 319/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 320/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 321/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 322/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00322: ReduceLROnPlateau reducing learning rate to 9.313225537987968e-12.
Epoch 323/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 324/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 325/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 326/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 327/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 328/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 329/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 330/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 331/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 332/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00332: ReduceLROnPlateau reducing learning rate to 4.656612768993984e-12.
Epoch 333/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 334/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 335/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 336/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 337/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 338/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 339/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 340/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 341/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 342/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00342: ReduceLROnPlateau reducing learning rate to 2.328306384496992e-12.
Epoch 343/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 344/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 345/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 346/2000
279/279 [==============================] - 0s 168us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 347/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 348/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 349/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 350/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 351/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 352/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00352: ReduceLROnPlateau reducing learning rate to 1.164153192248496e-12.
Epoch 353/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 354/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 355/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 356/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 357/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 358/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 359/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 360/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 361/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 362/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00362: ReduceLROnPlateau reducing learning rate to 5.82076596124248e-13.
Epoch 363/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 364/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 365/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 366/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 367/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 368/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 369/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 370/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 371/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 372/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00372: ReduceLROnPlateau reducing learning rate to 2.91038298062124e-13.
Epoch 373/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 374/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 375/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 376/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 377/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 378/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 379/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 380/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 381/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 382/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00382: ReduceLROnPlateau reducing learning rate to 1.45519149031062e-13.
Epoch 383/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 384/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 385/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 386/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 387/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 388/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 389/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 390/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 391/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 392/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00392: ReduceLROnPlateau reducing learning rate to 7.2759574515531e-14.
Epoch 393/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 394/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 395/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 396/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 397/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 398/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 399/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 400/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 401/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 402/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00402: ReduceLROnPlateau reducing learning rate to 3.63797872577655e-14.
Epoch 403/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 404/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 405/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 406/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 407/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 408/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 409/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 410/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 411/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 412/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00412: ReduceLROnPlateau reducing learning rate to 1.818989362888275e-14.
Epoch 413/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 414/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 415/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 416/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 417/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 418/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 419/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 420/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 421/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 422/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00422: ReduceLROnPlateau reducing learning rate to 9.094946814441375e-15.
Epoch 423/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 424/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 425/2000
279/279 [==============================] - 0s 65us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 426/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 427/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 428/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 429/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 430/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 431/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 432/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00432: ReduceLROnPlateau reducing learning rate to 4.5474734072206875e-15.
Epoch 433/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 434/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 435/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 436/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 437/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 438/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 439/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 440/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 441/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 442/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00442: ReduceLROnPlateau reducing learning rate to 2.2737367036103438e-15.
Epoch 443/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 444/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 445/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 446/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 447/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 448/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 449/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 450/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 451/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 452/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00452: ReduceLROnPlateau reducing learning rate to 1.1368683518051719e-15.
Epoch 453/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 454/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 455/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 456/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 457/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 458/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 459/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 460/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 461/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 462/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00462: ReduceLROnPlateau reducing learning rate to 5.684341759025859e-16.
Epoch 463/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 464/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 465/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 466/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 467/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 468/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 469/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 470/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 471/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 472/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00472: ReduceLROnPlateau reducing learning rate to 2.8421708795129297e-16.
Epoch 473/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 474/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 475/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 476/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 477/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 478/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 479/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 480/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 481/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 482/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00482: ReduceLROnPlateau reducing learning rate to 1.4210854397564648e-16.
Epoch 483/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 484/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 485/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 486/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 487/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 488/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 489/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 490/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 491/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 492/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00492: ReduceLROnPlateau reducing learning rate to 7.105427198782324e-17.
Epoch 493/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 494/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 495/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 496/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 497/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 498/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 499/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 500/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 501/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 502/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00502: ReduceLROnPlateau reducing learning rate to 3.552713599391162e-17.
Epoch 503/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 504/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 505/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 506/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 507/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 508/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 509/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 510/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 511/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 512/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00512: ReduceLROnPlateau reducing learning rate to 1.776356799695581e-17.
Epoch 513/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 514/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 515/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 516/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 517/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 518/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 519/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 520/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 521/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 522/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00522: ReduceLROnPlateau reducing learning rate to 8.881783998477905e-18.
Epoch 523/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 524/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 525/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 526/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 527/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 528/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 529/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 530/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 531/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 532/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00532: ReduceLROnPlateau reducing learning rate to 4.440891999238953e-18.
Epoch 533/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 534/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 535/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 536/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 537/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 538/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 539/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 540/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 541/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 542/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00542: ReduceLROnPlateau reducing learning rate to 2.2204459996194763e-18.
Epoch 543/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 544/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 545/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 546/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 547/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 548/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 549/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 550/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 551/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 552/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00552: ReduceLROnPlateau reducing learning rate to 1.1102229998097382e-18.
Epoch 553/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 554/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 555/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 556/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 557/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 558/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 559/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 560/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 561/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 562/2000
279/279 [==============================] - ETA: 0s - loss: 0.0241 - accuracy: 1.00 - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00562: ReduceLROnPlateau reducing learning rate to 5.551114999048691e-19.
Epoch 563/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 564/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 565/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 566/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 567/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 568/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 569/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 570/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 571/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 572/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00572: ReduceLROnPlateau reducing learning rate to 2.7755574995243454e-19.
Epoch 573/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 574/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 575/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 576/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 577/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 578/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 579/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 580/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 581/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 582/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00582: ReduceLROnPlateau reducing learning rate to 1.3877787497621727e-19.
Epoch 583/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 584/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 585/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 586/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 587/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 588/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 589/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 590/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 591/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 592/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00592: ReduceLROnPlateau reducing learning rate to 6.938893748810864e-20.
Epoch 593/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 594/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 595/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 596/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 597/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 598/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 599/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 600/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 601/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 602/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00602: ReduceLROnPlateau reducing learning rate to 3.469446874405432e-20.
Epoch 603/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 604/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 605/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 606/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 607/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 608/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 609/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 610/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 611/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 612/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00612: ReduceLROnPlateau reducing learning rate to 1.734723437202716e-20.
Epoch 613/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 614/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 615/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 616/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 617/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 618/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 619/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 620/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 621/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 622/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00622: ReduceLROnPlateau reducing learning rate to 8.67361718601358e-21.
Epoch 623/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 624/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 625/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 626/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 627/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 628/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 629/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 630/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 631/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 632/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00632: ReduceLROnPlateau reducing learning rate to 4.33680859300679e-21.
Epoch 633/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 634/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 635/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 636/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 637/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 638/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 639/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 640/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 641/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 642/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00642: ReduceLROnPlateau reducing learning rate to 2.168404296503395e-21.
Epoch 643/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 644/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 645/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 646/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 647/2000
279/279 [==============================] - ETA: 0s - loss: 0.0232 - accuracy: 1.00 - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 648/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 649/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 650/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 651/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 652/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00652: ReduceLROnPlateau reducing learning rate to 1.0842021482516974e-21.
Epoch 653/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 654/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 655/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 656/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 657/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 658/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 659/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 660/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 661/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 662/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00662: ReduceLROnPlateau reducing learning rate to 5.421010741258487e-22.
Epoch 663/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 664/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 665/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 666/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 667/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 668/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 669/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 670/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 671/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 672/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00672: ReduceLROnPlateau reducing learning rate to 2.7105053706292436e-22.
Epoch 673/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 674/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 675/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 676/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 677/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 678/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 679/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 680/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 681/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 682/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00682: ReduceLROnPlateau reducing learning rate to 1.3552526853146218e-22.
Epoch 683/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 684/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 685/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 686/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 687/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 688/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 689/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 690/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 691/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 692/2000
279/279 [==============================] - 0s 133us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00692: ReduceLROnPlateau reducing learning rate to 6.776263426573109e-23.
Epoch 693/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 694/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 695/2000
279/279 [==============================] - 0s 122us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 696/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 697/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 698/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 699/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 700/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 701/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 702/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00702: ReduceLROnPlateau reducing learning rate to 3.3881317132865545e-23.
Epoch 703/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 704/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 705/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 706/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 707/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 708/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 709/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 710/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 711/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 712/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00712: ReduceLROnPlateau reducing learning rate to 1.6940658566432772e-23.
Epoch 713/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 714/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 715/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 716/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 717/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 718/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 719/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 720/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 721/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 722/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00722: ReduceLROnPlateau reducing learning rate to 8.470329283216386e-24.
Epoch 723/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 724/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 725/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 726/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 727/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 728/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 729/2000
279/279 [==============================] - ETA: 0s - loss: 0.0332 - accuracy: 1.00 - 0s 125us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 730/2000
279/279 [==============================] - 0s 125us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 731/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 732/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00732: ReduceLROnPlateau reducing learning rate to 4.235164641608193e-24.
Epoch 733/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 734/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 735/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 736/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 737/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 738/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 739/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 740/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 741/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 742/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00742: ReduceLROnPlateau reducing learning rate to 2.1175823208040965e-24.
Epoch 743/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 744/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 745/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 746/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 747/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 748/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 749/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 750/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 751/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 752/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00752: ReduceLROnPlateau reducing learning rate to 1.0587911604020483e-24.
Epoch 753/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 754/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 755/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 756/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 757/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 758/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 759/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 760/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 761/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 762/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00762: ReduceLROnPlateau reducing learning rate to 5.293955802010241e-25.
Epoch 763/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 764/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 765/2000
279/279 [==============================] - 0s 125us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 766/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 767/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 768/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 769/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 770/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 771/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 772/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00772: ReduceLROnPlateau reducing learning rate to 2.6469779010051207e-25.
Epoch 773/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 774/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 775/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 776/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 777/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 778/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 779/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 780/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 781/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 782/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00782: ReduceLROnPlateau reducing learning rate to 1.3234889505025603e-25.
Epoch 783/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 784/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 785/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 786/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 787/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 788/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 789/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 790/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 791/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 792/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00792: ReduceLROnPlateau reducing learning rate to 6.617444752512802e-26.
Epoch 793/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 794/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 795/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 796/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 797/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 798/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 799/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 800/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 801/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 802/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00802: ReduceLROnPlateau reducing learning rate to 3.308722376256401e-26.
Epoch 803/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 804/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 805/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 806/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 807/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 808/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 809/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 810/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 811/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 812/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00812: ReduceLROnPlateau reducing learning rate to 1.6543611881282004e-26.
Epoch 813/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 814/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 815/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 816/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 817/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 818/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 819/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 820/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 821/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 822/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00822: ReduceLROnPlateau reducing learning rate to 8.271805940641002e-27.
Epoch 823/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 824/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 825/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 826/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 827/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 828/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 829/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 830/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 831/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 832/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00832: ReduceLROnPlateau reducing learning rate to 4.135902970320501e-27.
Epoch 833/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 834/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 835/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 836/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 837/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 838/2000
279/279 [==============================] - ETA: 0s - loss: 0.0285 - accuracy: 1.00 - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 839/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 840/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 841/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 842/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00842: ReduceLROnPlateau reducing learning rate to 2.0679514851602505e-27.
Epoch 843/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 844/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 845/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 846/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 847/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 848/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 849/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 850/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 851/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 852/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00852: ReduceLROnPlateau reducing learning rate to 1.0339757425801253e-27.
Epoch 853/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 854/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 855/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 856/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 857/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 858/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 859/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 860/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 861/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 862/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00862: ReduceLROnPlateau reducing learning rate to 5.169878712900626e-28.
Epoch 863/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 864/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 865/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 866/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 867/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 868/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 869/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 870/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 871/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 872/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00872: ReduceLROnPlateau reducing learning rate to 2.584939356450313e-28.
Epoch 873/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 874/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 875/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 876/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 877/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 878/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 879/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 880/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 881/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 882/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00882: ReduceLROnPlateau reducing learning rate to 1.2924696782251566e-28.
Epoch 883/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 884/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 885/2000
279/279 [==============================] - ETA: 0s - loss: 0.0114 - accuracy: 1.00 - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 886/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 887/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 888/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 889/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 890/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 891/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 892/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00892: ReduceLROnPlateau reducing learning rate to 6.462348391125783e-29.
Epoch 893/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 894/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 895/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 896/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 897/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 898/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 899/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 900/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 901/2000
279/279 [==============================] - 0s 133us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 902/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00902: ReduceLROnPlateau reducing learning rate to 3.2311741955628914e-29.
Epoch 903/2000
279/279 [==============================] - 0s 122us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 904/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 905/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 906/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 907/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 908/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 909/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 910/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 911/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 912/2000
279/279 [==============================] - 0s 129us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00912: ReduceLROnPlateau reducing learning rate to 1.6155870977814457e-29.
Epoch 913/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 914/2000
279/279 [==============================] - 0s 133us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 915/2000
279/279 [==============================] - 0s 125us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 916/2000
279/279 [==============================] - 0s 122us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 917/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 918/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 919/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 920/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 921/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 922/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00922: ReduceLROnPlateau reducing learning rate to 8.077935488907229e-30.
Epoch 923/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 924/2000
279/279 [==============================] - ETA: 0s - loss: 0.0248 - accuracy: 1.00 - 0s 122us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 925/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 926/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 927/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 928/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 929/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 930/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 931/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 932/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00932: ReduceLROnPlateau reducing learning rate to 4.038967744453614e-30.
Epoch 933/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 934/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 935/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 936/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 937/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 938/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 939/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 940/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 941/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 942/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00942: ReduceLROnPlateau reducing learning rate to 2.019483872226807e-30.
Epoch 943/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 944/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 945/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 946/2000
279/279 [==============================] - 0s 125us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 947/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 948/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 949/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 950/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 951/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 952/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00952: ReduceLROnPlateau reducing learning rate to 1.0097419361134036e-30.
Epoch 953/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 954/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 955/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 956/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 957/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 958/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 959/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 960/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 961/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 962/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00962: ReduceLROnPlateau reducing learning rate to 5.048709680567018e-31.
Epoch 963/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 964/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 965/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 966/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 967/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 968/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 969/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 970/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 971/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 972/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00972: ReduceLROnPlateau reducing learning rate to 2.524354840283509e-31.
Epoch 973/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 974/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 975/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 976/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 977/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 978/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 979/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 980/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 981/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 982/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00982: ReduceLROnPlateau reducing learning rate to 1.2621774201417545e-31.
Epoch 983/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 984/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 985/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 986/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 987/2000
279/279 [==============================] - 0s 122us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 988/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 989/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 990/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 991/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 992/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00992: ReduceLROnPlateau reducing learning rate to 6.310887100708772e-32.
Epoch 993/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 994/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 995/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 996/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 997/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 998/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 999/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1000/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1001/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1002/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01002: ReduceLROnPlateau reducing learning rate to 3.155443550354386e-32.
Epoch 1003/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1004/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1005/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1006/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1007/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1008/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1009/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1010/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1011/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1012/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01012: ReduceLROnPlateau reducing learning rate to 1.577721775177193e-32.
Epoch 1013/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1014/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1015/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1016/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1017/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1018/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1019/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1020/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1021/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1022/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01022: ReduceLROnPlateau reducing learning rate to 7.888608875885965e-33.
Epoch 1023/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1024/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1025/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1026/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1027/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1028/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1029/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1030/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1031/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1032/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01032: ReduceLROnPlateau reducing learning rate to 3.944304437942983e-33.
Epoch 1033/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1034/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1035/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1036/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1037/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1038/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1039/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1040/2000
279/279 [==============================] - 0s 290us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1041/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1042/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01042: ReduceLROnPlateau reducing learning rate to 1.9721522189714914e-33.
Epoch 1043/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1044/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1045/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1046/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1047/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1048/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1049/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1050/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1051/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1052/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01052: ReduceLROnPlateau reducing learning rate to 9.860761094857457e-34.
Epoch 1053/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1054/2000
279/279 [==============================] - 0s 143us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1055/2000
279/279 [==============================] - ETA: 0s - loss: 0.0338 - accuracy: 1.00 - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1056/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1057/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1058/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1059/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1060/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1061/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1062/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01062: ReduceLROnPlateau reducing learning rate to 4.930380547428728e-34.
Epoch 1063/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1064/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1065/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1066/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1067/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1068/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1069/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1070/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1071/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1072/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01072: ReduceLROnPlateau reducing learning rate to 2.465190273714364e-34.
Epoch 1073/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1074/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1075/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1076/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1077/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1078/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1079/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1080/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1081/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1082/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01082: ReduceLROnPlateau reducing learning rate to 1.232595136857182e-34.
Epoch 1083/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1084/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1085/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1086/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1087/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1088/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1089/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1090/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1091/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1092/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01092: ReduceLROnPlateau reducing learning rate to 6.16297568428591e-35.
Epoch 1093/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1094/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1095/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1096/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1097/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1098/2000
279/279 [==============================] - ETA: 0s - loss: 0.0289 - accuracy: 1.00 - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1099/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1100/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1101/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1102/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01102: ReduceLROnPlateau reducing learning rate to 3.081487842142955e-35.
Epoch 1103/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1104/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1105/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1106/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1107/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1108/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1109/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1110/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1111/2000
279/279 [==============================] - 0s 122us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1112/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01112: ReduceLROnPlateau reducing learning rate to 1.5407439210714776e-35.
Epoch 1113/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1114/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1115/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1116/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1117/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1118/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1119/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1120/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1121/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1122/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01122: ReduceLROnPlateau reducing learning rate to 7.703719605357388e-36.
Epoch 1123/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1124/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1125/2000
279/279 [==============================] - 0s 125us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1126/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1127/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1128/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1129/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1130/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1131/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1132/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01132: ReduceLROnPlateau reducing learning rate to 3.851859802678694e-36.
Epoch 1133/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1134/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1135/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1136/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1137/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1138/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1139/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1140/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1141/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1142/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01142: ReduceLROnPlateau reducing learning rate to 1.925929901339347e-36.
Epoch 1143/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1144/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1145/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1146/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1147/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1148/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1149/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1150/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1151/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1152/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01152: ReduceLROnPlateau reducing learning rate to 9.629649506696735e-37.
Epoch 1153/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1154/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1155/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1156/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1157/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1158/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1159/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1160/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1161/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1162/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01162: ReduceLROnPlateau reducing learning rate to 4.8148247533483676e-37.
Epoch 1163/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1164/2000
279/279 [==============================] - 0s 133us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1165/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1166/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1167/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1168/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1169/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1170/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1171/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1172/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01172: ReduceLROnPlateau reducing learning rate to 2.4074123766741838e-37.
Epoch 1173/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1174/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1175/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1176/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1177/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1178/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1179/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1180/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1181/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1182/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01182: ReduceLROnPlateau reducing learning rate to 1.2037061883370919e-37.
Epoch 1183/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1184/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1185/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1186/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1187/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1188/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1189/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1190/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1191/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1192/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01192: ReduceLROnPlateau reducing learning rate to 6.018530941685459e-38.
Epoch 1193/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1194/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1195/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1196/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1197/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1198/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1199/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1200/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1201/2000
279/279 [==============================] - 0s 125us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1202/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01202: ReduceLROnPlateau reducing learning rate to 3.0092654708427297e-38.
Epoch 1203/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1204/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1205/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1206/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1207/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1208/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1209/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1210/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1211/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1212/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01212: ReduceLROnPlateau reducing learning rate to 1.5046327354213649e-38.
Epoch 1213/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1214/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1215/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1216/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1217/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1218/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1219/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1220/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1221/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1222/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01222: ReduceLROnPlateau reducing learning rate to 7.523163677106824e-39.
Epoch 1223/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1224/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1225/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1226/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1227/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1228/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1229/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1230/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1231/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1232/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01232: ReduceLROnPlateau reducing learning rate to 3.761581838553412e-39.
Epoch 1233/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1234/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1235/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1236/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1237/2000
279/279 [==============================] - 0s 122us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1238/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1239/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1240/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1241/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1242/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01242: ReduceLROnPlateau reducing learning rate to 1.88079056895209e-39.
Epoch 1243/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1244/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1245/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1246/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1247/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1248/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1249/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1250/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1251/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1252/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01252: ReduceLROnPlateau reducing learning rate to 9.40395284476045e-40.
Epoch 1253/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1254/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1255/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1256/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1257/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1258/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1259/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1260/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1261/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1262/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01262: ReduceLROnPlateau reducing learning rate to 4.701972919134064e-40.
Epoch 1263/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1264/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1265/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1266/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1267/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1268/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1269/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1270/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1271/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1272/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01272: ReduceLROnPlateau reducing learning rate to 2.350986459567032e-40.
Epoch 1273/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1274/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1275/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1276/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1277/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1278/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1279/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1280/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1281/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1282/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01282: ReduceLROnPlateau reducing learning rate to 1.175493229783516e-40.
Epoch 1283/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1284/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1285/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1286/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1287/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1288/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1289/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1290/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1291/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1292/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01292: ReduceLROnPlateau reducing learning rate to 5.87746614891758e-41.
Epoch 1293/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1294/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1295/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1296/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1297/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1298/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1299/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1300/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1301/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1302/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01302: ReduceLROnPlateau reducing learning rate to 2.93873307445879e-41.
Epoch 1303/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1304/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1305/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1306/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1307/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1308/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1309/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1310/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1311/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1312/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01312: ReduceLROnPlateau reducing learning rate to 1.4694015696910032e-41.
Epoch 1313/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1314/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1315/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1316/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1317/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1318/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1319/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1320/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1321/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1322/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01322: ReduceLROnPlateau reducing learning rate to 7.347007848455016e-42.
Epoch 1323/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1324/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1325/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1326/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1327/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1328/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1329/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1330/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1331/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1332/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01332: ReduceLROnPlateau reducing learning rate to 3.673503924227508e-42.
Epoch 1333/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1334/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1335/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1336/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1337/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1338/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1339/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1340/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1341/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1342/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01342: ReduceLROnPlateau reducing learning rate to 1.8371022867298352e-42.
Epoch 1343/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1344/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1345/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1346/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1347/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1348/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1349/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1350/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1351/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1352/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01352: ReduceLROnPlateau reducing learning rate to 9.185511433649176e-43.
Epoch 1353/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1354/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1355/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1356/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1357/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1358/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1359/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1360/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1361/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1362/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01362: ReduceLROnPlateau reducing learning rate to 4.5962589629854e-43.
Epoch 1363/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1364/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1365/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1366/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1367/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1368/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1369/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1370/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1371/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1372/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01372: ReduceLROnPlateau reducing learning rate to 2.2981294814927e-43.
Epoch 1373/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1374/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1375/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1376/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1377/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1378/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1379/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1380/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1381/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1382/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01382: ReduceLROnPlateau reducing learning rate to 1.14906474074635e-43.
Epoch 1383/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1384/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1385/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1386/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1387/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1388/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1389/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1390/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1391/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1392/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01392: ReduceLROnPlateau reducing learning rate to 5.74532370373175e-44.
Epoch 1393/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1394/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1395/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1396/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1397/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1398/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1399/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1400/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1401/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1402/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01402: ReduceLROnPlateau reducing learning rate to 2.872661851865875e-44.
Epoch 1403/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1404/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1405/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1406/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1407/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1408/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1409/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1410/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1411/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1412/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01412: ReduceLROnPlateau reducing learning rate to 1.401298464324817e-44.
Epoch 1413/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1414/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1415/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1416/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1417/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1418/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1419/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1420/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1421/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1422/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01422: ReduceLROnPlateau reducing learning rate to 7.006492321624085e-45.
Epoch 1423/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1424/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1425/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1426/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1427/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1428/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1429/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1430/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1431/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1432/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01432: ReduceLROnPlateau reducing learning rate to 3.5032461608120427e-45.
Epoch 1433/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1434/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1435/2000
279/279 [==============================] - 0s 122us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1436/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1437/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1438/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1439/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1440/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1441/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1442/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01442: ReduceLROnPlateau reducing learning rate to 1.401298464324817e-45.
Epoch 1443/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1444/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1445/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1446/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1447/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1448/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1449/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1450/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1451/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1452/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01452: ReduceLROnPlateau reducing learning rate to 7.006492321624085e-46.
Epoch 1453/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1454/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1455/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1456/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1457/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1458/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1459/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1460/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1461/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1462/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1463/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1464/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1465/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1466/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1467/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1468/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1469/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1470/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1471/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1472/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1473/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1474/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1475/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1476/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1477/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1478/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1479/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1480/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1481/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1482/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1483/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1484/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1485/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1486/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1487/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1488/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1489/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1490/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1491/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1492/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1493/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1494/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1495/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1496/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1497/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1498/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1499/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1500/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1501/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1502/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1503/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1504/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1505/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1506/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1507/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1508/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1509/2000
279/279 [==============================] - 0s 125us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1510/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1511/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1512/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1513/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1514/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1515/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1516/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1517/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1518/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1519/2000
279/279 [==============================] - ETA: 0s - loss: 0.0344 - accuracy: 1.00 - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1520/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1521/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1522/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1523/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1524/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1525/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1526/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1527/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1528/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1529/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1530/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1531/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1532/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1533/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1534/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1535/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1536/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1537/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1538/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1539/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1540/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1541/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1542/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1543/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1544/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1545/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1546/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1547/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1548/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1549/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1550/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1551/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1552/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1553/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1554/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1555/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1556/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1557/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1558/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1559/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1560/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1561/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1562/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1563/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1564/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1565/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1566/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1567/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1568/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1569/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1570/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1571/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1572/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1573/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1574/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1575/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1576/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1577/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1578/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1579/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1580/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1581/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1582/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1583/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1584/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1585/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1586/2000
279/279 [==============================] - ETA: 0s - loss: 0.0197 - accuracy: 1.00 - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1587/2000
279/279 [==============================] - 0s 147us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1588/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1589/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1590/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1591/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1592/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1593/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1594/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1595/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1596/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1597/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1598/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1599/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1600/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1601/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1602/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1603/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1604/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1605/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1606/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1607/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1608/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1609/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1610/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1611/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1612/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1613/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1614/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1615/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1616/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1617/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1618/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1619/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1620/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1621/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1622/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1623/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1624/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1625/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1626/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1627/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1628/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1629/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1630/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1631/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1632/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1633/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1634/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1635/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1636/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1637/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1638/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1639/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1640/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1641/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1642/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1643/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1644/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1645/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1646/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1647/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1648/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1649/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1650/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1651/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1652/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1653/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1654/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1655/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1656/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1657/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1658/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1659/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1660/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1661/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1662/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1663/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1664/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1665/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1666/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1667/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1668/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1669/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1670/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1671/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1672/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1673/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1674/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1675/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1676/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1677/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1678/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1679/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1680/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1681/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1682/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1683/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1684/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1685/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1686/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1687/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1688/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1689/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1690/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1691/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1692/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1693/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1694/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1695/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1696/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1697/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1698/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1699/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1700/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1701/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1702/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1703/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1704/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1705/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1706/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1707/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1708/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1709/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1710/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1711/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1712/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1713/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1714/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1715/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1716/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1717/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1718/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1719/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1720/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1721/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1722/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1723/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1724/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1725/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1726/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1727/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1728/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1729/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1730/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1731/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1732/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1733/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1734/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1735/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1736/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1737/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1738/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1739/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1740/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1741/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1742/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1743/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1744/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1745/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1746/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1747/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1748/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1749/2000
279/279 [==============================] - ETA: 0s - loss: 0.0269 - accuracy: 1.00 - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1750/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1751/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1752/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1753/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1754/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1755/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1756/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1757/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1758/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1759/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1760/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1761/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1762/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1763/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1764/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1765/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1766/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1767/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1768/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1769/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1770/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1771/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1772/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1773/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1774/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1775/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1776/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1777/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1778/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1779/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1780/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1781/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1782/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1783/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1784/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1785/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1786/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1787/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1788/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1789/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1790/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1791/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1792/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1793/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1794/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1795/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1796/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1797/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1798/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1799/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1800/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1801/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1802/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1803/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1804/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1805/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1806/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1807/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1808/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1809/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1810/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1811/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1812/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1813/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1814/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1815/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1816/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1817/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1818/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1819/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1820/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1821/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1822/2000
279/279 [==============================] - 0s 154us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1823/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1824/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1825/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1826/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1827/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1828/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1829/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1830/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1831/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1832/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1833/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1834/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1835/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1836/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1837/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1838/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1839/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1840/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1841/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1842/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1843/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1844/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1845/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1846/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1847/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1848/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1849/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1850/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1851/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1852/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1853/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1854/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1855/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1856/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1857/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1858/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1859/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1860/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1861/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1862/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1863/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1864/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1865/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1866/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1867/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1868/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1869/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1870/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1871/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1872/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1873/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1874/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1875/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1876/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1877/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1878/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1879/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1880/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1881/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1882/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1883/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1884/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1885/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1886/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1887/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1888/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1889/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1890/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1891/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1892/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1893/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1894/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1895/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1896/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1897/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1898/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1899/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1900/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1901/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1902/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1903/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1904/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1905/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1906/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1907/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1908/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1909/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1910/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1911/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1912/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1913/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1914/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1915/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1916/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1917/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1918/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1919/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1920/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1921/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1922/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1923/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1924/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1925/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1926/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1927/2000
279/279 [==============================] - ETA: 0s - loss: 0.0263 - accuracy: 1.00 - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1928/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1929/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1930/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1931/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1932/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1933/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1934/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1935/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1936/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1937/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1938/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1939/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1940/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1941/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1942/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1943/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1944/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1945/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1946/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1947/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1948/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1949/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1950/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1951/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1952/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1953/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1954/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1955/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1956/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1957/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1958/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1959/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1960/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1961/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1962/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1963/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1964/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1965/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1966/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1967/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1968/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1969/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1970/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1971/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1972/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1973/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1974/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1975/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1976/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1977/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1978/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1979/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1980/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1981/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1982/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1983/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1984/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1985/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1986/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1987/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1988/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1989/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1990/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1991/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1992/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1993/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1994/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1995/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1996/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1997/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1998/2000
279/279 [==============================] - 0s 88us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1999/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 2000/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
In [213]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 2000)
In [214]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
93/93 [==============================] - 0s 54us/step
test loss: 0.9480327226782358, test accuracy: 0.7634408473968506
In [215]:
y_pred = model.predict(X_test)
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
Kappa:  0.28059071729957796
AUC ROC:  0.6239130434782609

KMeans

In [216]:
X
Out[216]:
tonalcentroidfiles_1 tonalcentroidfiles_2 tonalcentroidfiles_3 tonalcentroidfiles_4 tonalcentroidfiles_5 tonalcentroidfiles_6
0 1.574572 0.885785 -0.744040 1.222732 -1.020937 0.253925
1 1.110821 0.925768 0.226451 -0.119687 0.143401 -0.009902
2 -0.106107 1.808295 1.961049 1.107464 2.076449 1.966210
3 0.083078 1.385239 1.506771 1.294360 1.104665 2.105667
4 -0.164331 0.169248 0.525026 1.442347 1.710639 0.657093
5 -0.601767 0.100025 2.894764 -1.234721 1.277722 2.254758
6 -0.436923 -0.300019 1.146480 0.730610 -1.634206 -0.621955
7 -0.587280 1.210009 0.829948 0.235398 -0.830262 0.277419
8 -0.072320 0.439239 -0.084262 0.666161 -0.979709 0.134482
9 0.177494 0.386052 0.266785 -1.461050 1.702079 -0.627335
10 0.473878 0.893926 -0.138418 -0.267275 -0.407548 -0.229186
11 0.245249 0.482974 0.995106 0.679754 0.235560 0.480101
12 0.011981 -0.373717 -0.589054 0.487517 -1.428960 0.073724
13 0.190041 -0.273603 0.483229 0.925167 -1.268062 -0.057357
14 -1.064192 -0.043564 -1.313412 -1.204309 1.571772 -1.751836
15 -0.197842 0.773898 0.917595 -0.533388 1.883323 -0.803595
16 -0.610344 0.615674 0.638901 -1.525221 1.272377 -1.132221
17 0.804488 -1.479012 0.374228 0.166272 -1.743433 -1.002346
18 0.490692 -1.982800 -0.516405 -0.202546 -1.666137 -1.170162
19 0.592053 -1.492906 0.420008 -0.901877 -2.327543 -0.070308
20 -1.787738 1.285484 -0.785859 -2.380832 -1.022434 -0.395605
21 -1.742165 -0.033766 -0.693835 -0.641834 0.381590 -1.954523
22 -1.729041 0.173705 -1.150118 -0.130491 -1.173120 -1.443805
23 0.437142 1.722799 -2.129021 -2.481456 -0.156650 0.254809
24 1.350380 0.970678 0.076009 -0.404025 -1.384857 0.117089
25 0.496482 -0.133100 -0.887460 0.472889 -1.490365 1.615562
26 0.364827 1.228853 -0.931602 -0.240277 -0.555015 1.259771
27 1.022426 1.569202 -1.345165 -1.077121 -0.192695 0.678057
28 0.458228 1.620487 -0.211045 -1.256812 0.846741 -0.038512
29 0.607951 1.683390 -0.591685 -0.243413 -0.937265 2.063508
... ... ... ... ... ... ...
342 0.736233 -1.887137 0.319724 -0.489954 -0.346230 -0.577137
343 2.234360 -0.046651 1.657368 -1.085388 0.721768 -1.665035
344 0.911113 0.039319 -1.297534 0.592375 -0.241987 2.261413
345 1.384636 -0.476054 -1.183101 0.367974 0.257066 1.378080
346 0.961181 0.071015 -0.810140 0.868325 0.332780 1.588246
347 0.937051 -1.450311 -0.319702 -0.228450 0.739750 -0.449282
348 -3.281761 0.179415 -1.446642 -0.685115 -0.600794 0.658153
349 -2.344820 0.180118 0.106773 0.037180 -0.877178 1.123398
350 1.460135 0.906456 -0.147713 0.343900 -0.620485 0.700956
351 2.114961 0.965145 -1.378351 -0.574489 -1.693320 -0.016307
352 1.504768 0.846661 -1.860231 0.705179 -1.292241 0.593433
353 -0.213580 0.437840 0.427356 1.095762 -1.001309 -0.054969
354 -0.063245 0.024794 0.060996 0.129995 -0.599389 0.500376
355 0.116032 0.004823 0.235041 0.085659 -0.431386 0.814703
356 -0.330558 0.187426 -0.234587 0.942953 -0.434096 0.203910
357 0.380368 0.608654 -0.308912 1.154966 -0.255135 0.167498
358 0.893056 0.389669 -0.426997 0.638788 -0.700280 0.368667
359 -1.019517 -2.497618 0.166376 1.273368 0.313702 -0.420230
360 -0.784247 -1.148191 -0.802374 1.038236 -0.602589 -0.913446
361 0.139494 -2.036594 -1.137199 -0.379348 -0.257913 0.506162
362 1.494839 -2.022204 -0.164524 -2.180060 0.002713 1.550614
363 3.128156 -1.231830 0.035160 -2.785380 -0.610055 0.724620
364 2.010326 -1.482568 -1.697983 -2.728569 -0.765820 2.873139
365 1.499390 0.641291 -0.739018 -1.456660 -0.760400 -0.452027
366 1.398136 1.715250 -0.369182 -1.280480 -0.150680 -0.884280
367 1.103318 0.778728 -0.851121 -1.368219 0.142626 -0.918794
368 0.303168 0.188358 0.095953 -0.024506 -0.709672 -1.109607
369 1.183673 0.747660 -0.209307 0.329011 -1.151082 -0.726250
370 -0.723654 -0.290377 1.173636 -0.123624 1.997744 -0.687810
371 -0.073840 1.011128 0.445136 0.821330 -0.338478 -0.694080

372 rows × 6 columns

In [217]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[217]:
[2232.0,
 1860.320016741722,
 1609.923817414493,
 1413.3293251834796,
 1299.2594621943642,
 1187.9595776711437,
 1107.911820622218,
 1042.5800837127117,
 995.2457858732439,
 962.2198115266042,
 937.080734625301,
 898.72618782214,
 866.9993526105154,
 833.7799456003584]
In [218]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[218]:
[<matplotlib.lines.Line2D at 0x1e82f9e3828>]

K=4

In [219]:
kmeans_tc = KMeans(n_clusters=4, random_state=0, n_init=10)
kmeans_tc.fit(X)
Out[219]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=4, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [220]:
kmeans_tc.labels_
Out[220]:
array([2, 2, 0, 0, 0, 0, 1, 0, 0, 3, 2, 0, 1, 1, 3, 3, 3, 1, 1, 1, 3, 3,
       1, 3, 2, 2, 2, 2, 3, 2, 0, 0, 3, 3, 1, 1, 1, 1, 1, 1, 0, 0, 0, 3,
       3, 3, 2, 2, 2, 2, 2, 0, 1, 1, 0, 0, 0, 2, 2, 2, 3, 1, 3, 1, 3, 3,
       0, 0, 0, 3, 3, 3, 3, 2, 0, 2, 2, 0, 0, 0, 0, 0, 0, 2, 3, 3, 0, 0,
       2, 1, 0, 3, 2, 1, 1, 2, 0, 3, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0,
       3, 0, 0, 0, 3, 0, 2, 2, 2, 0, 3, 2, 0, 3, 2, 3, 1, 1, 1, 0, 2, 0,
       3, 3, 3, 3, 1, 2, 0, 1, 0, 1, 1, 2, 1, 0, 1, 1, 0, 0, 0, 3, 0, 2,
       0, 0, 0, 1, 1, 2, 0, 0, 0, 1, 1, 1, 2, 2, 1, 3, 2, 3, 0, 1, 1, 2,
       0, 1, 1, 1, 1, 2, 1, 2, 2, 3, 3, 0, 0, 1, 1, 0, 2, 2, 2, 2, 2, 1,
       2, 1, 1, 3, 0, 1, 2, 3, 2, 2, 3, 2, 0, 2, 2, 1, 0, 1, 0, 1, 2, 1,
       2, 3, 3, 3, 2, 2, 2, 1, 2, 1, 1, 2, 1, 2, 2, 3, 2, 3, 3, 3, 3, 3,
       3, 3, 3, 1, 0, 0, 2, 2, 2, 1, 2, 2, 3, 3, 3, 1, 2, 2, 3, 3, 3, 3,
       2, 1, 1, 1, 3, 3, 1, 1, 1, 2, 2, 1, 3, 2, 0, 2, 2, 3, 3, 3, 3, 1,
       0, 2, 2, 3, 2, 0, 3, 1, 3, 2, 1, 0, 0, 3, 0, 0, 0, 2, 0, 1, 1, 0,
       0, 0, 3, 0, 0, 1, 1, 3, 1, 1, 3, 0, 2, 2, 1, 1, 1, 1, 1, 1, 2, 0,
       0, 3, 0, 0, 0, 2, 2, 3, 2, 1, 1, 1, 1, 3, 2, 2, 2, 1, 0, 0, 2, 2,
       2, 0, 0, 0, 0, 2, 2, 1, 1, 1, 2, 2, 2, 2, 3, 3, 1, 2, 3, 0])
In [221]:
clusters_tc = kmeans_tc.predict(X)
clusters_tc
Out[221]:
array([2, 2, 0, 0, 0, 0, 1, 0, 0, 3, 2, 0, 1, 1, 3, 3, 3, 1, 1, 1, 3, 3,
       1, 3, 2, 2, 2, 2, 3, 2, 0, 0, 3, 3, 1, 1, 1, 1, 1, 1, 0, 0, 0, 3,
       3, 3, 2, 2, 2, 2, 2, 0, 1, 1, 0, 0, 0, 2, 2, 2, 3, 1, 3, 1, 3, 3,
       0, 0, 0, 3, 3, 3, 3, 2, 0, 2, 2, 0, 0, 0, 0, 0, 0, 2, 3, 3, 0, 0,
       2, 1, 0, 3, 2, 1, 1, 2, 0, 3, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0,
       3, 0, 0, 0, 3, 0, 2, 2, 2, 0, 3, 2, 0, 3, 2, 3, 1, 1, 1, 0, 2, 0,
       3, 3, 3, 3, 1, 2, 0, 1, 0, 1, 1, 2, 1, 0, 1, 1, 0, 0, 0, 3, 0, 2,
       0, 0, 0, 1, 1, 2, 0, 0, 0, 1, 1, 1, 2, 2, 1, 3, 2, 3, 0, 1, 1, 2,
       0, 1, 1, 1, 1, 2, 1, 2, 2, 3, 3, 0, 0, 1, 1, 0, 2, 2, 2, 2, 2, 1,
       2, 1, 1, 3, 0, 1, 2, 3, 2, 2, 3, 2, 0, 2, 2, 1, 0, 1, 0, 1, 2, 1,
       2, 3, 3, 3, 2, 2, 2, 1, 2, 1, 1, 2, 1, 2, 2, 3, 2, 3, 3, 3, 3, 3,
       3, 3, 3, 1, 0, 0, 2, 2, 2, 1, 2, 2, 3, 3, 3, 1, 2, 2, 3, 3, 3, 3,
       2, 1, 1, 1, 3, 3, 1, 1, 1, 2, 2, 1, 3, 2, 0, 2, 2, 3, 3, 3, 3, 1,
       0, 2, 2, 3, 2, 0, 3, 1, 3, 2, 1, 0, 0, 3, 0, 0, 0, 2, 0, 1, 1, 0,
       0, 0, 3, 0, 0, 1, 1, 3, 1, 1, 3, 0, 2, 2, 1, 1, 1, 1, 1, 1, 2, 0,
       0, 3, 0, 0, 0, 2, 2, 3, 2, 1, 1, 1, 1, 3, 2, 2, 2, 1, 0, 0, 2, 2,
       2, 0, 0, 0, 0, 2, 2, 1, 1, 1, 2, 2, 2, 2, 3, 3, 1, 2, 3, 0])
In [222]:
X.loc[:,'Cluster'] = clusters_tc
X.loc[:,'chosen'] = list(y)
In [223]:
X
Out[223]:
tonalcentroidfiles_1 tonalcentroidfiles_2 tonalcentroidfiles_3 tonalcentroidfiles_4 tonalcentroidfiles_5 tonalcentroidfiles_6 Cluster chosen
0 1.574572 0.885785 -0.744040 1.222732 -1.020937 0.253925 2 0
1 1.110821 0.925768 0.226451 -0.119687 0.143401 -0.009902 2 0
2 -0.106107 1.808295 1.961049 1.107464 2.076449 1.966210 0 0
3 0.083078 1.385239 1.506771 1.294360 1.104665 2.105667 0 0
4 -0.164331 0.169248 0.525026 1.442347 1.710639 0.657093 0 0
5 -0.601767 0.100025 2.894764 -1.234721 1.277722 2.254758 0 0
6 -0.436923 -0.300019 1.146480 0.730610 -1.634206 -0.621955 1 0
7 -0.587280 1.210009 0.829948 0.235398 -0.830262 0.277419 0 0
8 -0.072320 0.439239 -0.084262 0.666161 -0.979709 0.134482 0 0
9 0.177494 0.386052 0.266785 -1.461050 1.702079 -0.627335 3 0
10 0.473878 0.893926 -0.138418 -0.267275 -0.407548 -0.229186 2 0
11 0.245249 0.482974 0.995106 0.679754 0.235560 0.480101 0 0
12 0.011981 -0.373717 -0.589054 0.487517 -1.428960 0.073724 1 0
13 0.190041 -0.273603 0.483229 0.925167 -1.268062 -0.057357 1 0
14 -1.064192 -0.043564 -1.313412 -1.204309 1.571772 -1.751836 3 0
15 -0.197842 0.773898 0.917595 -0.533388 1.883323 -0.803595 3 0
16 -0.610344 0.615674 0.638901 -1.525221 1.272377 -1.132221 3 0
17 0.804488 -1.479012 0.374228 0.166272 -1.743433 -1.002346 1 0
18 0.490692 -1.982800 -0.516405 -0.202546 -1.666137 -1.170162 1 0
19 0.592053 -1.492906 0.420008 -0.901877 -2.327543 -0.070308 1 0
20 -1.787738 1.285484 -0.785859 -2.380832 -1.022434 -0.395605 3 0
21 -1.742165 -0.033766 -0.693835 -0.641834 0.381590 -1.954523 3 0
22 -1.729041 0.173705 -1.150118 -0.130491 -1.173120 -1.443805 1 0
23 0.437142 1.722799 -2.129021 -2.481456 -0.156650 0.254809 3 0
24 1.350380 0.970678 0.076009 -0.404025 -1.384857 0.117089 2 0
25 0.496482 -0.133100 -0.887460 0.472889 -1.490365 1.615562 2 0
26 0.364827 1.228853 -0.931602 -0.240277 -0.555015 1.259771 2 0
27 1.022426 1.569202 -1.345165 -1.077121 -0.192695 0.678057 2 0
28 0.458228 1.620487 -0.211045 -1.256812 0.846741 -0.038512 3 0
29 0.607951 1.683390 -0.591685 -0.243413 -0.937265 2.063508 2 0
... ... ... ... ... ... ... ... ...
342 0.736233 -1.887137 0.319724 -0.489954 -0.346230 -0.577137 1 1
343 2.234360 -0.046651 1.657368 -1.085388 0.721768 -1.665035 3 1
344 0.911113 0.039319 -1.297534 0.592375 -0.241987 2.261413 2 1
345 1.384636 -0.476054 -1.183101 0.367974 0.257066 1.378080 2 1
346 0.961181 0.071015 -0.810140 0.868325 0.332780 1.588246 2 1
347 0.937051 -1.450311 -0.319702 -0.228450 0.739750 -0.449282 1 1
348 -3.281761 0.179415 -1.446642 -0.685115 -0.600794 0.658153 0 1
349 -2.344820 0.180118 0.106773 0.037180 -0.877178 1.123398 0 1
350 1.460135 0.906456 -0.147713 0.343900 -0.620485 0.700956 2 1
351 2.114961 0.965145 -1.378351 -0.574489 -1.693320 -0.016307 2 1
352 1.504768 0.846661 -1.860231 0.705179 -1.292241 0.593433 2 1
353 -0.213580 0.437840 0.427356 1.095762 -1.001309 -0.054969 0 1
354 -0.063245 0.024794 0.060996 0.129995 -0.599389 0.500376 0 1
355 0.116032 0.004823 0.235041 0.085659 -0.431386 0.814703 0 1
356 -0.330558 0.187426 -0.234587 0.942953 -0.434096 0.203910 0 1
357 0.380368 0.608654 -0.308912 1.154966 -0.255135 0.167498 2 1
358 0.893056 0.389669 -0.426997 0.638788 -0.700280 0.368667 2 1
359 -1.019517 -2.497618 0.166376 1.273368 0.313702 -0.420230 1 1
360 -0.784247 -1.148191 -0.802374 1.038236 -0.602589 -0.913446 1 1
361 0.139494 -2.036594 -1.137199 -0.379348 -0.257913 0.506162 1 1
362 1.494839 -2.022204 -0.164524 -2.180060 0.002713 1.550614 2 1
363 3.128156 -1.231830 0.035160 -2.785380 -0.610055 0.724620 2 1
364 2.010326 -1.482568 -1.697983 -2.728569 -0.765820 2.873139 2 1
365 1.499390 0.641291 -0.739018 -1.456660 -0.760400 -0.452027 2 1
366 1.398136 1.715250 -0.369182 -1.280480 -0.150680 -0.884280 3 1
367 1.103318 0.778728 -0.851121 -1.368219 0.142626 -0.918794 3 1
368 0.303168 0.188358 0.095953 -0.024506 -0.709672 -1.109607 1 1
369 1.183673 0.747660 -0.209307 0.329011 -1.151082 -0.726250 2 1
370 -0.723654 -0.290377 1.173636 -0.123624 1.997744 -0.687810 3 1
371 -0.073840 1.011128 0.445136 0.821330 -0.338478 -0.694080 0 1

372 rows × 8 columns

In [224]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[224]:
<matplotlib.axes._subplots.AxesSubplot at 0x1e82fa1f9e8>
In [206]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[1]))

Club De Banqueros y Empresarios

ANN

In [225]:
X = df_n_ps_std_tc[1]
In [226]:
y = df_n_ps[1]['chosen']
In [227]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [228]:
X_train.shape
Out[228]:
(191, 6)
In [229]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [230]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [231]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [232]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'logistic', 'hidden_layer_sizes': (10,), 'learning_rate_init': 0.004, 'max_iter': 400}, que permiten obtener un Accuracy de 77.49% y un Kappa del 29.13
Tiempo total: 27.26 minutos
In [233]:
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [234]:
input_tensor = Input(shape = (n0,))
In [235]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = 'tanh')(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [236]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [237]:
model.summary()
Model: "model_9"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_9 (InputLayer)         (None, 6)                 0         
_________________________________________________________________
dense_28 (Dense)             (None, 10)                70        
_________________________________________________________________
dense_29 (Dense)             (None, 1)                 11        
=================================================================
Total params: 81
Trainable params: 81
Non-trainable params: 0
_________________________________________________________________
In [238]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), batch_size= 32,
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 191 samples, validate on 64 samples
Epoch 1/400
191/191 [==============================] - 0s 637us/step - loss: 0.7698 - accuracy: 0.5236 - val_loss: 0.7665 - val_accuracy: 0.4844
Epoch 2/400
191/191 [==============================] - 0s 68us/step - loss: 0.7283 - accuracy: 0.5497 - val_loss: 0.7359 - val_accuracy: 0.4844
Epoch 3/400
191/191 [==============================] - 0s 73us/step - loss: 0.6917 - accuracy: 0.5602 - val_loss: 0.7134 - val_accuracy: 0.4844
Epoch 4/400
191/191 [==============================] - 0s 73us/step - loss: 0.6639 - accuracy: 0.5969 - val_loss: 0.6940 - val_accuracy: 0.5156
Epoch 5/400
191/191 [==============================] - 0s 68us/step - loss: 0.6388 - accuracy: 0.6387 - val_loss: 0.6769 - val_accuracy: 0.5312
Epoch 6/400
191/191 [==============================] - 0s 73us/step - loss: 0.6195 - accuracy: 0.6492 - val_loss: 0.6648 - val_accuracy: 0.5625
Epoch 7/400
191/191 [==============================] - 0s 68us/step - loss: 0.6030 - accuracy: 0.6859 - val_loss: 0.6552 - val_accuracy: 0.5938
Epoch 8/400
191/191 [==============================] - 0s 63us/step - loss: 0.5890 - accuracy: 0.7173 - val_loss: 0.6494 - val_accuracy: 0.5781
Epoch 9/400
191/191 [==============================] - 0s 63us/step - loss: 0.5794 - accuracy: 0.7277 - val_loss: 0.6450 - val_accuracy: 0.6250
Epoch 10/400
191/191 [==============================] - 0s 68us/step - loss: 0.5711 - accuracy: 0.7330 - val_loss: 0.6423 - val_accuracy: 0.6250
Epoch 11/400
191/191 [==============================] - 0s 68us/step - loss: 0.5650 - accuracy: 0.7330 - val_loss: 0.6408 - val_accuracy: 0.6406
Epoch 12/400
191/191 [==============================] - 0s 68us/step - loss: 0.5593 - accuracy: 0.7330 - val_loss: 0.6408 - val_accuracy: 0.6406
Epoch 13/400
191/191 [==============================] - 0s 68us/step - loss: 0.5559 - accuracy: 0.7382 - val_loss: 0.6398 - val_accuracy: 0.6406
Epoch 14/400
191/191 [==============================] - 0s 63us/step - loss: 0.5528 - accuracy: 0.7382 - val_loss: 0.6403 - val_accuracy: 0.6406
Epoch 15/400
191/191 [==============================] - 0s 63us/step - loss: 0.5501 - accuracy: 0.7435 - val_loss: 0.6411 - val_accuracy: 0.6406
Epoch 16/400
191/191 [==============================] - 0s 63us/step - loss: 0.5477 - accuracy: 0.7382 - val_loss: 0.6409 - val_accuracy: 0.6406
Epoch 17/400
191/191 [==============================] - 0s 63us/step - loss: 0.5464 - accuracy: 0.7382 - val_loss: 0.6396 - val_accuracy: 0.6406
Epoch 18/400
191/191 [==============================] - 0s 68us/step - loss: 0.5444 - accuracy: 0.7435 - val_loss: 0.6407 - val_accuracy: 0.6406
Epoch 19/400
191/191 [==============================] - 0s 89us/step - loss: 0.5432 - accuracy: 0.7487 - val_loss: 0.6406 - val_accuracy: 0.6406
Epoch 20/400
191/191 [==============================] - 0s 84us/step - loss: 0.5419 - accuracy: 0.7539 - val_loss: 0.6414 - val_accuracy: 0.6406
Epoch 21/400
191/191 [==============================] - 0s 68us/step - loss: 0.5412 - accuracy: 0.7539 - val_loss: 0.6434 - val_accuracy: 0.6406

Epoch 00021: ReduceLROnPlateau reducing learning rate to 0.0020000000949949026.
Epoch 22/400
191/191 [==============================] - 0s 68us/step - loss: 0.5401 - accuracy: 0.7539 - val_loss: 0.6438 - val_accuracy: 0.6406
Epoch 23/400
191/191 [==============================] - 0s 63us/step - loss: 0.5393 - accuracy: 0.7592 - val_loss: 0.6438 - val_accuracy: 0.6406
Epoch 24/400
191/191 [==============================] - 0s 73us/step - loss: 0.5390 - accuracy: 0.7592 - val_loss: 0.6436 - val_accuracy: 0.6406
Epoch 25/400
191/191 [==============================] - 0s 68us/step - loss: 0.5384 - accuracy: 0.7592 - val_loss: 0.6437 - val_accuracy: 0.6406
Epoch 26/400
191/191 [==============================] - 0s 68us/step - loss: 0.5379 - accuracy: 0.7592 - val_loss: 0.6440 - val_accuracy: 0.6406
Epoch 27/400
191/191 [==============================] - 0s 79us/step - loss: 0.5375 - accuracy: 0.7539 - val_loss: 0.6440 - val_accuracy: 0.6406
Epoch 28/400
191/191 [==============================] - 0s 68us/step - loss: 0.5370 - accuracy: 0.7487 - val_loss: 0.6441 - val_accuracy: 0.6406
Epoch 29/400
191/191 [==============================] - 0s 68us/step - loss: 0.5367 - accuracy: 0.7539 - val_loss: 0.6440 - val_accuracy: 0.6406
Epoch 30/400
191/191 [==============================] - 0s 73us/step - loss: 0.5362 - accuracy: 0.7539 - val_loss: 0.6439 - val_accuracy: 0.6406
Epoch 31/400
191/191 [==============================] - 0s 78us/step - loss: 0.5357 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6406

Epoch 00031: ReduceLROnPlateau reducing learning rate to 0.0010000000474974513.
Epoch 32/400
191/191 [==============================] - 0s 78us/step - loss: 0.5352 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6406
Epoch 33/400
191/191 [==============================] - 0s 73us/step - loss: 0.5350 - accuracy: 0.7539 - val_loss: 0.6446 - val_accuracy: 0.6406
Epoch 34/400
191/191 [==============================] - 0s 68us/step - loss: 0.5349 - accuracy: 0.7539 - val_loss: 0.6442 - val_accuracy: 0.6562
Epoch 35/400
191/191 [==============================] - 0s 84us/step - loss: 0.5346 - accuracy: 0.7539 - val_loss: 0.6442 - val_accuracy: 0.6406
Epoch 36/400
191/191 [==============================] - 0s 89us/step - loss: 0.5344 - accuracy: 0.7539 - val_loss: 0.6442 - val_accuracy: 0.6406
Epoch 37/400
191/191 [==============================] - 0s 84us/step - loss: 0.5342 - accuracy: 0.7539 - val_loss: 0.6445 - val_accuracy: 0.6406
Epoch 38/400
191/191 [==============================] - 0s 73us/step - loss: 0.5339 - accuracy: 0.7539 - val_loss: 0.6445 - val_accuracy: 0.6406
Epoch 39/400
191/191 [==============================] - 0s 73us/step - loss: 0.5337 - accuracy: 0.7539 - val_loss: 0.6446 - val_accuracy: 0.6406
Epoch 40/400
191/191 [==============================] - 0s 99us/step - loss: 0.5335 - accuracy: 0.7539 - val_loss: 0.6446 - val_accuracy: 0.6406
Epoch 41/400
191/191 [==============================] - 0s 89us/step - loss: 0.5333 - accuracy: 0.7539 - val_loss: 0.6445 - val_accuracy: 0.6406
Epoch 42/400
191/191 [==============================] - 0s 78us/step - loss: 0.5330 - accuracy: 0.7539 - val_loss: 0.6446 - val_accuracy: 0.6406
Epoch 43/400
191/191 [==============================] - 0s 73us/step - loss: 0.5328 - accuracy: 0.7539 - val_loss: 0.6448 - val_accuracy: 0.6406
Epoch 44/400
191/191 [==============================] - 0s 73us/step - loss: 0.5325 - accuracy: 0.7539 - val_loss: 0.6446 - val_accuracy: 0.6406

Epoch 00044: ReduceLROnPlateau reducing learning rate to 0.0005000000237487257.
Epoch 45/400
191/191 [==============================] - 0s 68us/step - loss: 0.5323 - accuracy: 0.7539 - val_loss: 0.6446 - val_accuracy: 0.6406
Epoch 46/400
191/191 [==============================] - 0s 63us/step - loss: 0.5322 - accuracy: 0.7539 - val_loss: 0.6446 - val_accuracy: 0.6406
Epoch 47/400
191/191 [==============================] - 0s 78us/step - loss: 0.5321 - accuracy: 0.7539 - val_loss: 0.6446 - val_accuracy: 0.6406
Epoch 48/400
191/191 [==============================] - 0s 73us/step - loss: 0.5319 - accuracy: 0.7539 - val_loss: 0.6445 - val_accuracy: 0.6406
Epoch 49/400
191/191 [==============================] - 0s 73us/step - loss: 0.5318 - accuracy: 0.7539 - val_loss: 0.6446 - val_accuracy: 0.6406
Epoch 50/400
191/191 [==============================] - 0s 68us/step - loss: 0.5317 - accuracy: 0.7539 - val_loss: 0.6445 - val_accuracy: 0.6406
Epoch 51/400
191/191 [==============================] - 0s 68us/step - loss: 0.5316 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 52/400
191/191 [==============================] - 0s 68us/step - loss: 0.5315 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 53/400
191/191 [==============================] - 0s 58us/step - loss: 0.5313 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 54/400
191/191 [==============================] - 0s 63us/step - loss: 0.5312 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00054: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
Epoch 55/400
191/191 [==============================] - 0s 73us/step - loss: 0.5311 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 56/400
191/191 [==============================] - 0s 115us/step - loss: 0.5311 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 57/400
191/191 [==============================] - ETA: 0s - loss: 0.5277 - accuracy: 0.78 - 0s 84us/step - loss: 0.5310 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 58/400
191/191 [==============================] - 0s 73us/step - loss: 0.5310 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 59/400
191/191 [==============================] - 0s 73us/step - loss: 0.5309 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 60/400
191/191 [==============================] - 0s 73us/step - loss: 0.5308 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 61/400
191/191 [==============================] - 0s 73us/step - loss: 0.5307 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 62/400
191/191 [==============================] - 0s 73us/step - loss: 0.5307 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 63/400
191/191 [==============================] - 0s 73us/step - loss: 0.5306 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 64/400
191/191 [==============================] - 0s 84us/step - loss: 0.5306 - accuracy: 0.7539 - val_loss: 0.6445 - val_accuracy: 0.6562

Epoch 00064: ReduceLROnPlateau reducing learning rate to 0.0001250000059371814.
Epoch 65/400
191/191 [==============================] - 0s 63us/step - loss: 0.5305 - accuracy: 0.7539 - val_loss: 0.6445 - val_accuracy: 0.6562
Epoch 66/400
191/191 [==============================] - 0s 58us/step - loss: 0.5305 - accuracy: 0.7539 - val_loss: 0.6445 - val_accuracy: 0.6562
Epoch 67/400
191/191 [==============================] - 0s 68us/step - loss: 0.5305 - accuracy: 0.7539 - val_loss: 0.6445 - val_accuracy: 0.6562
Epoch 68/400
191/191 [==============================] - 0s 73us/step - loss: 0.5304 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 69/400
191/191 [==============================] - 0s 68us/step - loss: 0.5304 - accuracy: 0.7539 - val_loss: 0.6445 - val_accuracy: 0.6562
Epoch 70/400
191/191 [==============================] - 0s 94us/step - loss: 0.5304 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 71/400
191/191 [==============================] - 0s 68us/step - loss: 0.5303 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 72/400
191/191 [==============================] - 0s 68us/step - loss: 0.5303 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 73/400
191/191 [==============================] - 0s 68us/step - loss: 0.5303 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 74/400
191/191 [==============================] - 0s 68us/step - loss: 0.5303 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00074: ReduceLROnPlateau reducing learning rate to 6.25000029685907e-05.
Epoch 75/400
191/191 [==============================] - 0s 94us/step - loss: 0.5302 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 76/400
191/191 [==============================] - 0s 99us/step - loss: 0.5302 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 77/400
191/191 [==============================] - 0s 78us/step - loss: 0.5302 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 78/400
191/191 [==============================] - 0s 68us/step - loss: 0.5302 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 79/400
191/191 [==============================] - 0s 78us/step - loss: 0.5302 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 80/400
191/191 [==============================] - 0s 68us/step - loss: 0.5301 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 81/400
191/191 [==============================] - 0s 73us/step - loss: 0.5301 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 82/400
191/191 [==============================] - 0s 73us/step - loss: 0.5301 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 83/400
191/191 [==============================] - 0s 73us/step - loss: 0.5301 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 84/400
191/191 [==============================] - 0s 52us/step - loss: 0.5301 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00084: ReduceLROnPlateau reducing learning rate to 3.125000148429535e-05.
Epoch 85/400
191/191 [==============================] - 0s 63us/step - loss: 0.5301 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 86/400
191/191 [==============================] - 0s 68us/step - loss: 0.5301 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 87/400
191/191 [==============================] - 0s 73us/step - loss: 0.5301 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 88/400
191/191 [==============================] - 0s 63us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 89/400
191/191 [==============================] - 0s 63us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 90/400
191/191 [==============================] - 0s 58us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 91/400
191/191 [==============================] - 0s 63us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 92/400
191/191 [==============================] - 0s 68us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 93/400
191/191 [==============================] - 0s 58us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 94/400
191/191 [==============================] - 0s 63us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00094: ReduceLROnPlateau reducing learning rate to 1.5625000742147677e-05.
Epoch 95/400
191/191 [==============================] - 0s 68us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 96/400
191/191 [==============================] - 0s 68us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 97/400
191/191 [==============================] - 0s 63us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 98/400
191/191 [==============================] - 0s 63us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 99/400
191/191 [==============================] - 0s 89us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 100/400
191/191 [==============================] - 0s 63us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 101/400
191/191 [==============================] - 0s 58us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 102/400
191/191 [==============================] - 0s 63us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 103/400
191/191 [==============================] - 0s 58us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 104/400
191/191 [==============================] - 0s 63us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00104: ReduceLROnPlateau reducing learning rate to 7.812500371073838e-06.
Epoch 105/400
191/191 [==============================] - 0s 73us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 106/400
191/191 [==============================] - 0s 63us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 107/400
191/191 [==============================] - 0s 63us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 108/400
191/191 [==============================] - 0s 89us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 109/400
191/191 [==============================] - 0s 84us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 110/400
191/191 [==============================] - 0s 89us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 111/400
191/191 [==============================] - 0s 89us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 112/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 113/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 114/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00114: ReduceLROnPlateau reducing learning rate to 3.906250185536919e-06.
Epoch 115/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 116/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 117/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 118/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 119/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 120/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 121/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 122/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 123/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 124/400
191/191 [==============================] - 0s 105us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00124: ReduceLROnPlateau reducing learning rate to 1.9531250927684596e-06.
Epoch 125/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 126/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 127/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 128/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 129/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 130/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 131/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 132/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 133/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 134/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00134: ReduceLROnPlateau reducing learning rate to 9.765625463842298e-07.
Epoch 135/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 136/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 137/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 138/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 139/400
191/191 [==============================] - 0s 94us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 140/400
191/191 [==============================] - 0s 84us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 141/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 142/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 143/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 144/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00144: ReduceLROnPlateau reducing learning rate to 4.882812731921149e-07.
Epoch 145/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 146/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 147/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 148/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 149/400
191/191 [==============================] - 0s 105us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 150/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 151/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 152/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 153/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 154/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00154: ReduceLROnPlateau reducing learning rate to 2.4414063659605745e-07.
Epoch 155/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 156/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 157/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 158/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 159/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 160/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 161/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 162/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 163/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 164/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00164: ReduceLROnPlateau reducing learning rate to 1.2207031829802872e-07.
Epoch 165/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 166/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 167/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 168/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 169/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 170/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 171/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 172/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 173/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 174/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00174: ReduceLROnPlateau reducing learning rate to 6.103515914901436e-08.
Epoch 175/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 176/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 177/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 178/400
191/191 [==============================] - 0s 84us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 179/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 180/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 181/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 182/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 183/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 184/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00184: ReduceLROnPlateau reducing learning rate to 3.051757957450718e-08.
Epoch 185/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 186/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 187/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 188/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 189/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 190/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 191/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 192/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 193/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 194/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00194: ReduceLROnPlateau reducing learning rate to 1.525878978725359e-08.
Epoch 195/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 196/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 197/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 198/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 199/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 200/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 201/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 202/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 203/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 204/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00204: ReduceLROnPlateau reducing learning rate to 7.629394893626795e-09.
Epoch 205/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 206/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 207/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 208/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 209/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 210/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 211/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 212/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 213/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 214/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00214: ReduceLROnPlateau reducing learning rate to 3.814697446813398e-09.
Epoch 215/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 216/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 217/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 218/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 219/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 220/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 221/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 222/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 223/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 224/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00224: ReduceLROnPlateau reducing learning rate to 1.907348723406699e-09.
Epoch 225/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 226/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 227/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 228/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 229/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 230/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 231/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 232/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 233/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 234/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00234: ReduceLROnPlateau reducing learning rate to 9.536743617033494e-10.
Epoch 235/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 236/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 237/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 238/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 239/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 240/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 241/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 242/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 243/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 244/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00244: ReduceLROnPlateau reducing learning rate to 4.768371808516747e-10.
Epoch 245/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 246/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 247/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 248/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 249/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 250/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 251/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 252/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 253/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 254/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00254: ReduceLROnPlateau reducing learning rate to 2.3841859042583735e-10.
Epoch 255/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 256/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 257/400
191/191 [==============================] - 0s 99us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 258/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 259/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 260/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 261/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 262/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 263/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 264/400
191/191 [==============================] - 0s 47us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00264: ReduceLROnPlateau reducing learning rate to 1.1920929521291868e-10.
Epoch 265/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 266/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 267/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 268/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 269/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 270/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 271/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 272/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 273/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 274/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00274: ReduceLROnPlateau reducing learning rate to 5.960464760645934e-11.
Epoch 275/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 276/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 277/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 278/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 279/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 280/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 281/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 282/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 283/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 284/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00284: ReduceLROnPlateau reducing learning rate to 2.980232380322967e-11.
Epoch 285/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 286/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 287/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 288/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 289/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 290/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 291/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 292/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 293/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 294/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00294: ReduceLROnPlateau reducing learning rate to 1.4901161901614834e-11.
Epoch 295/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 296/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 297/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 298/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 299/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 300/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 301/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 302/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 303/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 304/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00304: ReduceLROnPlateau reducing learning rate to 7.450580950807417e-12.
Epoch 305/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 306/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 307/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 308/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 309/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 310/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 311/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 312/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 313/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 314/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00314: ReduceLROnPlateau reducing learning rate to 3.725290475403709e-12.
Epoch 315/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 316/400
191/191 [==============================] - 0s 47us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 317/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 318/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 319/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 320/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 321/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 322/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 323/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 324/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00324: ReduceLROnPlateau reducing learning rate to 1.8626452377018543e-12.
Epoch 325/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 326/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 327/400
191/191 [==============================] - 0s 47us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 328/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 329/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 330/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 331/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 332/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 333/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 334/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00334: ReduceLROnPlateau reducing learning rate to 9.313226188509272e-13.
Epoch 335/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 336/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 337/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 338/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 339/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 340/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 341/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 342/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 343/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 344/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00344: ReduceLROnPlateau reducing learning rate to 4.656613094254636e-13.
Epoch 345/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 346/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 347/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 348/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 349/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 350/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 351/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 352/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 353/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 354/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00354: ReduceLROnPlateau reducing learning rate to 2.328306547127318e-13.
Epoch 355/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 356/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 357/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 358/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 359/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 360/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 361/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 362/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 363/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 364/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00364: ReduceLROnPlateau reducing learning rate to 1.164153273563659e-13.
Epoch 365/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 366/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 367/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 368/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 369/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 370/400
191/191 [==============================] - 0s 47us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 371/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 372/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 373/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 374/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00374: ReduceLROnPlateau reducing learning rate to 5.820766367818295e-14.
Epoch 375/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 376/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 377/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 378/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 379/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 380/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 381/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 382/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 383/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 384/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00384: ReduceLROnPlateau reducing learning rate to 2.9103831839091474e-14.
Epoch 385/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 386/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 387/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 388/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 389/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 390/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 391/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 392/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 393/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 394/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00394: ReduceLROnPlateau reducing learning rate to 1.4551915919545737e-14.
Epoch 395/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 396/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 397/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 398/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 399/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 400/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
In [239]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 400)
In [240]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
64/64 [==============================] - 0s 62us/step
test loss: 0.644367516040802, test accuracy: 0.65625
In [241]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.49637681159420294
In [242]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
Kappa:  -0.023255813953488413

KMeans

In [243]:
X
Out[243]:
tonalcentroidfiles_1 tonalcentroidfiles_2 tonalcentroidfiles_3 tonalcentroidfiles_4 tonalcentroidfiles_5 tonalcentroidfiles_6
0 0.898091 0.151819 -1.172713 0.474387 -0.020230 1.228657
1 0.618513 -0.762588 0.061946 0.944076 0.697880 0.021150
2 0.685649 0.002933 0.719805 -1.251700 -0.952424 1.444556
3 1.175209 -0.552349 0.336427 0.482978 -0.212146 -0.144225
4 1.350337 -1.407757 0.258917 -0.523670 0.099306 1.706064
5 0.907564 -1.769301 1.177857 -0.869472 0.392594 0.385760
6 -0.071420 -0.800769 0.238726 1.318866 -1.075628 -0.545006
7 0.476433 -1.202140 -1.713665 0.379487 -0.347674 0.777899
8 0.572039 -1.488738 -0.403914 -1.066061 -0.818836 0.339231
9 0.741137 0.139987 0.726307 1.670135 -0.317435 -1.091941
10 0.533655 -0.111619 0.435253 1.832919 -0.556933 -1.014603
11 -0.667308 0.502566 -1.137726 -0.714521 -0.497571 0.123297
12 0.161812 0.294263 0.659166 -0.336211 1.410350 -0.272418
13 -0.373777 -1.439681 0.009190 0.731635 0.138615 0.850511
14 0.745550 0.214669 0.209787 0.424963 0.448908 -0.204578
15 0.320726 0.108060 0.208510 -1.138882 -0.874041 -1.779091
16 0.646392 -0.726119 0.153724 -0.203580 -1.017329 -1.068601
17 -0.042981 -0.672256 0.358250 -0.385808 -0.341018 -1.823744
18 0.822192 0.184879 1.658679 1.705929 3.070140 -1.218005
19 0.175070 0.195153 1.969940 0.005043 0.430538 -1.502715
20 1.339692 -1.202498 0.487937 -0.769520 -1.973308 -0.400699
21 1.290923 -0.546138 0.120024 0.429258 -0.165681 0.856938
22 1.528224 -0.912727 0.962682 -0.386673 -0.772181 -0.291766
23 -0.486779 -1.124424 0.559106 0.746533 -1.101240 1.082216
24 -0.230729 0.999926 -0.678209 -0.175670 1.412258 0.572372
25 -0.632681 0.618852 -0.778803 -0.808112 -0.442115 -0.146177
26 -1.151505 -1.127449 1.500641 -0.822825 0.158380 0.792656
27 0.265739 -3.078847 -0.939567 0.268673 -0.642098 -0.984495
28 0.623357 -1.241561 -1.149654 1.231993 2.023015 -0.070476
29 0.930863 -1.763587 -1.608926 0.462097 -0.677599 -0.693427
... ... ... ... ... ... ...
225 -1.444140 -0.088370 -0.458428 0.530251 -0.475625 -0.057486
226 -0.297006 0.887935 0.467148 2.000374 -0.396849 -0.846195
227 -1.624166 0.777486 0.635044 -1.376180 0.998008 -0.910882
228 0.230618 1.438780 0.301556 -1.353873 -0.586627 -0.102947
229 -0.163123 1.329205 0.721279 -1.383030 0.540446 -1.181571
230 -1.337576 0.249897 0.081067 0.886335 -0.078090 -0.344245
231 0.304553 0.584052 0.915910 2.455180 1.007231 0.268298
232 -0.291785 0.247731 -0.740382 0.896773 0.457951 0.390640
233 -0.532056 1.686101 0.358185 -1.561985 0.911246 0.638759
234 -1.223692 0.723005 0.599197 -0.955626 0.653814 0.112686
235 1.412552 -0.817418 0.038464 -2.397710 -2.903923 1.454325
236 0.141392 -0.756740 -1.981390 -0.636588 0.230786 0.968907
237 1.157567 -0.442417 -1.342532 -0.893118 -0.552517 -0.791388
238 -1.683225 -0.036571 0.297162 -1.488549 1.387872 -0.306946
239 -0.997159 0.655257 2.239993 -1.422875 0.373101 0.159004
240 -1.142741 0.931927 1.440876 0.665641 -0.994237 -1.093039
241 -0.151675 -0.971306 0.447819 0.895444 -0.863907 0.150120
242 -0.837654 -1.170592 0.622658 0.448216 -0.830715 -0.222067
243 -0.059101 -0.857751 0.253657 0.272951 -0.833270 0.160823
244 1.455210 -1.123798 1.124970 -1.841854 -0.183521 -0.193778
245 1.459407 -1.071308 -0.261053 -0.731205 0.603463 0.358072
246 1.850117 -1.364586 1.015519 -1.479941 -1.262489 -0.485304
247 0.468703 0.776904 -1.200084 -0.109459 0.572206 0.353229
248 0.758187 -0.030802 -1.190930 -0.092637 0.048267 2.174173
249 0.465492 -0.042081 0.541343 0.584645 0.066443 -1.886670
250 -1.114193 1.666162 0.201458 -1.543125 -0.123758 -0.430641
251 -1.675129 1.101864 0.721966 -1.964153 0.827116 0.134812
252 -1.371728 0.888874 -0.186673 -0.931346 0.795500 -1.063218
253 0.221249 0.272024 -1.593712 -0.242394 0.752955 1.102656
254 -0.747040 1.308435 0.858494 -1.950134 1.779312 -0.711789

255 rows × 6 columns

In [244]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[244]:
[1530.0000000000002,
 1266.8988304034983,
 1085.4171102625123,
 963.5827926636907,
 872.5239995069635,
 797.6140851961846,
 747.1323294070899,
 703.670300371115,
 664.3614627122823,
 637.5590430281768,
 607.7011770650902,
 585.4389967082509,
 558.8506960652073,
 540.5660329891642]
In [245]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[245]:
[<matplotlib.lines.Line2D at 0x1e82fedf908>]

K=3

In [248]:
kmeans_tc = KMeans(n_clusters=3, random_state=0, n_init=10)
kmeans_tc.fit(X)
Out[248]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=3, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [249]:
kmeans_tc.labels_
Out[249]:
array([2, 0, 2, 0, 2, 0, 0, 2, 2, 0, 0, 2, 1, 0, 0, 1, 0, 0, 0, 0, 0, 2,
       0, 0, 1, 1, 0, 0, 0, 2, 2, 0, 0, 1, 1, 0, 2, 2, 2, 2, 2, 0, 0, 0,
       0, 2, 2, 0, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, 2, 2, 2, 2, 0,
       1, 1, 1, 1, 1, 0, 2, 0, 0, 2, 2, 2, 0, 0, 2, 2, 0, 2, 0, 1, 0, 1,
       1, 1, 2, 2, 2, 0, 0, 0, 2, 1, 2, 1, 0, 2, 0, 2, 2, 1, 1, 2, 1, 1,
       1, 0, 1, 2, 0, 2, 2, 1, 1, 1, 2, 2, 0, 2, 2, 2, 1, 0, 2, 1, 0, 0,
       2, 2, 2, 2, 1, 1, 1, 2, 0, 0, 0, 0, 1, 0, 2, 1, 2, 2, 2, 2, 1, 1,
       1, 1, 2, 1, 1, 1, 2, 2, 1, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 1, 2, 2,
       1, 2, 0, 1, 1, 2, 2, 2, 2, 0, 0, 2, 2, 1, 0, 2, 1, 1, 1, 2, 1, 2,
       2, 2, 1, 1, 2, 2, 1, 2, 0, 2, 0, 2, 2, 0, 0, 0, 0, 0, 2, 2, 0, 2,
       0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 2, 1, 1, 2, 2, 2, 1, 1, 1, 0,
       0, 0, 0, 2, 0, 2, 2, 0, 1, 1, 1, 2, 1])
In [250]:
clusters_tc = kmeans_tc.predict(X)
clusters_tc
Out[250]:
array([2, 0, 2, 0, 2, 0, 0, 2, 2, 0, 0, 2, 1, 0, 0, 1, 0, 0, 0, 0, 0, 2,
       0, 0, 1, 1, 0, 0, 0, 2, 2, 0, 0, 1, 1, 0, 2, 2, 2, 2, 2, 0, 0, 0,
       0, 2, 2, 0, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, 2, 2, 2, 2, 0,
       1, 1, 1, 1, 1, 0, 2, 0, 0, 2, 2, 2, 0, 0, 2, 2, 0, 2, 0, 1, 0, 1,
       1, 1, 2, 2, 2, 0, 0, 0, 2, 1, 2, 1, 0, 2, 0, 2, 2, 1, 1, 2, 1, 1,
       1, 0, 1, 2, 0, 2, 2, 1, 1, 1, 2, 2, 0, 2, 2, 2, 1, 0, 2, 1, 0, 0,
       2, 2, 2, 2, 1, 1, 1, 2, 0, 0, 0, 0, 1, 0, 2, 1, 2, 2, 2, 2, 1, 1,
       1, 1, 2, 1, 1, 1, 2, 2, 1, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 1, 2, 2,
       1, 2, 0, 1, 1, 2, 2, 2, 2, 0, 0, 2, 2, 1, 0, 2, 1, 1, 1, 2, 1, 2,
       2, 2, 1, 1, 2, 2, 1, 2, 0, 2, 0, 2, 2, 0, 0, 0, 0, 0, 2, 2, 0, 2,
       0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 2, 1, 1, 2, 2, 2, 1, 1, 1, 0,
       0, 0, 0, 2, 0, 2, 2, 0, 1, 1, 1, 2, 1])
In [251]:
X.loc[:,'Cluster'] = clusters_tc
X.loc[:,'chosen'] = list(y)
In [252]:
X
Out[252]:
tonalcentroidfiles_1 tonalcentroidfiles_2 tonalcentroidfiles_3 tonalcentroidfiles_4 tonalcentroidfiles_5 tonalcentroidfiles_6 Cluster chosen
0 0.898091 0.151819 -1.172713 0.474387 -0.020230 1.228657 2 0
1 0.618513 -0.762588 0.061946 0.944076 0.697880 0.021150 0 0
2 0.685649 0.002933 0.719805 -1.251700 -0.952424 1.444556 2 0
3 1.175209 -0.552349 0.336427 0.482978 -0.212146 -0.144225 0 0
4 1.350337 -1.407757 0.258917 -0.523670 0.099306 1.706064 2 0
5 0.907564 -1.769301 1.177857 -0.869472 0.392594 0.385760 0 0
6 -0.071420 -0.800769 0.238726 1.318866 -1.075628 -0.545006 0 0
7 0.476433 -1.202140 -1.713665 0.379487 -0.347674 0.777899 2 0
8 0.572039 -1.488738 -0.403914 -1.066061 -0.818836 0.339231 2 0
9 0.741137 0.139987 0.726307 1.670135 -0.317435 -1.091941 0 0
10 0.533655 -0.111619 0.435253 1.832919 -0.556933 -1.014603 0 0
11 -0.667308 0.502566 -1.137726 -0.714521 -0.497571 0.123297 2 0
12 0.161812 0.294263 0.659166 -0.336211 1.410350 -0.272418 1 0
13 -0.373777 -1.439681 0.009190 0.731635 0.138615 0.850511 0 0
14 0.745550 0.214669 0.209787 0.424963 0.448908 -0.204578 0 0
15 0.320726 0.108060 0.208510 -1.138882 -0.874041 -1.779091 1 0
16 0.646392 -0.726119 0.153724 -0.203580 -1.017329 -1.068601 0 0
17 -0.042981 -0.672256 0.358250 -0.385808 -0.341018 -1.823744 0 0
18 0.822192 0.184879 1.658679 1.705929 3.070140 -1.218005 0 0
19 0.175070 0.195153 1.969940 0.005043 0.430538 -1.502715 0 0
20 1.339692 -1.202498 0.487937 -0.769520 -1.973308 -0.400699 0 0
21 1.290923 -0.546138 0.120024 0.429258 -0.165681 0.856938 2 0
22 1.528224 -0.912727 0.962682 -0.386673 -0.772181 -0.291766 0 0
23 -0.486779 -1.124424 0.559106 0.746533 -1.101240 1.082216 0 0
24 -0.230729 0.999926 -0.678209 -0.175670 1.412258 0.572372 1 0
25 -0.632681 0.618852 -0.778803 -0.808112 -0.442115 -0.146177 1 0
26 -1.151505 -1.127449 1.500641 -0.822825 0.158380 0.792656 0 0
27 0.265739 -3.078847 -0.939567 0.268673 -0.642098 -0.984495 0 0
28 0.623357 -1.241561 -1.149654 1.231993 2.023015 -0.070476 0 0
29 0.930863 -1.763587 -1.608926 0.462097 -0.677599 -0.693427 2 0
... ... ... ... ... ... ... ... ...
225 -1.444140 -0.088370 -0.458428 0.530251 -0.475625 -0.057486 1 1
226 -0.297006 0.887935 0.467148 2.000374 -0.396849 -0.846195 0 1
227 -1.624166 0.777486 0.635044 -1.376180 0.998008 -0.910882 1 1
228 0.230618 1.438780 0.301556 -1.353873 -0.586627 -0.102947 1 1
229 -0.163123 1.329205 0.721279 -1.383030 0.540446 -1.181571 1 1
230 -1.337576 0.249897 0.081067 0.886335 -0.078090 -0.344245 1 1
231 0.304553 0.584052 0.915910 2.455180 1.007231 0.268298 0 1
232 -0.291785 0.247731 -0.740382 0.896773 0.457951 0.390640 2 1
233 -0.532056 1.686101 0.358185 -1.561985 0.911246 0.638759 1 1
234 -1.223692 0.723005 0.599197 -0.955626 0.653814 0.112686 1 1
235 1.412552 -0.817418 0.038464 -2.397710 -2.903923 1.454325 2 1
236 0.141392 -0.756740 -1.981390 -0.636588 0.230786 0.968907 2 1
237 1.157567 -0.442417 -1.342532 -0.893118 -0.552517 -0.791388 2 1
238 -1.683225 -0.036571 0.297162 -1.488549 1.387872 -0.306946 1 1
239 -0.997159 0.655257 2.239993 -1.422875 0.373101 0.159004 1 1
240 -1.142741 0.931927 1.440876 0.665641 -0.994237 -1.093039 1 1
241 -0.151675 -0.971306 0.447819 0.895444 -0.863907 0.150120 0 1
242 -0.837654 -1.170592 0.622658 0.448216 -0.830715 -0.222067 0 1
243 -0.059101 -0.857751 0.253657 0.272951 -0.833270 0.160823 0 1
244 1.455210 -1.123798 1.124970 -1.841854 -0.183521 -0.193778 0 1
245 1.459407 -1.071308 -0.261053 -0.731205 0.603463 0.358072 2 1
246 1.850117 -1.364586 1.015519 -1.479941 -1.262489 -0.485304 0 1
247 0.468703 0.776904 -1.200084 -0.109459 0.572206 0.353229 2 1
248 0.758187 -0.030802 -1.190930 -0.092637 0.048267 2.174173 2 1
249 0.465492 -0.042081 0.541343 0.584645 0.066443 -1.886670 0 1
250 -1.114193 1.666162 0.201458 -1.543125 -0.123758 -0.430641 1 1
251 -1.675129 1.101864 0.721966 -1.964153 0.827116 0.134812 1 1
252 -1.371728 0.888874 -0.186673 -0.931346 0.795500 -1.063218 1 1
253 0.221249 0.272024 -1.593712 -0.242394 0.752955 1.102656 2 1
254 -0.747040 1.308435 0.858494 -1.950134 1.779312 -0.711789 1 1

255 rows × 8 columns

In [253]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[253]:
<matplotlib.axes._subplots.AxesSubplot at 0x1e8304ba3c8>
In [75]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[2]))

Gramma

ANN

In [256]:
X = df_n_ps_std_tc[2]
In [257]:
y = df_n_ps[2]['chosen']
In [258]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [259]:
X_train.shape
Out[259]:
(231, 6)
In [260]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [261]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [262]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [263]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'logistic', 'hidden_layer_sizes': (10,), 'learning_rate_init': 0.01, 'max_iter': 10}, que permiten obtener un Accuracy de 81.82% y un Kappa del 6.27
Tiempo total: 28.04 minutos
In [264]:
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [265]:
input_tensor = Input(shape = (n0,))
In [266]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = 'tanh')(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [267]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [268]:
model.summary()
Model: "model_10"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_10 (InputLayer)        (None, 6)                 0         
_________________________________________________________________
dense_30 (Dense)             (None, 10)                70        
_________________________________________________________________
dense_31 (Dense)             (None, 1)                 11        
=================================================================
Total params: 81
Trainable params: 81
Non-trainable params: 0
_________________________________________________________________
In [269]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), batch_size= 32,
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 231 samples, validate on 78 samples
Epoch 1/10
231/231 [==============================] - 0s 662us/step - loss: 0.6836 - accuracy: 0.5455 - val_loss: 0.6505 - val_accuracy: 0.6410
Epoch 2/10
231/231 [==============================] - 0s 48us/step - loss: 0.5944 - accuracy: 0.7706 - val_loss: 0.6123 - val_accuracy: 0.7436
Epoch 3/10
231/231 [==============================] - 0s 56us/step - loss: 0.5463 - accuracy: 0.7879 - val_loss: 0.6006 - val_accuracy: 0.7564
Epoch 4/10
231/231 [==============================] - 0s 56us/step - loss: 0.5110 - accuracy: 0.7922 - val_loss: 0.5894 - val_accuracy: 0.7692
Epoch 5/10
231/231 [==============================] - 0s 56us/step - loss: 0.4904 - accuracy: 0.8009 - val_loss: 0.5875 - val_accuracy: 0.7692
Epoch 6/10
231/231 [==============================] - 0s 52us/step - loss: 0.4813 - accuracy: 0.8095 - val_loss: 0.5922 - val_accuracy: 0.7564
Epoch 7/10
231/231 [==============================] - 0s 78us/step - loss: 0.4761 - accuracy: 0.8052 - val_loss: 0.5972 - val_accuracy: 0.7692
Epoch 8/10
231/231 [==============================] - 0s 56us/step - loss: 0.4736 - accuracy: 0.8009 - val_loss: 0.5994 - val_accuracy: 0.7564
Epoch 9/10
231/231 [==============================] - 0s 61us/step - loss: 0.4734 - accuracy: 0.8052 - val_loss: 0.6042 - val_accuracy: 0.7564
Epoch 10/10
231/231 [==============================] - 0s 52us/step - loss: 0.4710 - accuracy: 0.8095 - val_loss: 0.5983 - val_accuracy: 0.7564
In [270]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 10)
In [271]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
78/78 [==============================] - 0s 77us/step
test loss: 0.5983365086408762, test accuracy: 0.7564102411270142
In [272]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.487957181088314
In [273]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
Kappa:  0.0

KMeans

In [274]:
X
Out[274]:
tonalcentroidfiles_1 tonalcentroidfiles_2 tonalcentroidfiles_3 tonalcentroidfiles_4 tonalcentroidfiles_5 tonalcentroidfiles_6
0 0.609365 0.535135 0.096066 1.915075 0.555249 1.854023
1 -0.157519 -1.311578 -0.486252 0.435334 -0.648735 -1.184658
2 -1.148976 -1.325889 0.573178 -1.556913 0.331644 1.236576
3 -0.996141 -0.557658 0.998693 -0.200592 -0.357882 0.086757
4 -0.648977 -0.248473 0.461357 -1.268368 -1.468590 -0.111563
5 0.642800 -1.165140 -1.792767 -0.560937 -0.122789 1.152255
6 0.443136 -0.482419 -1.016712 0.352897 -0.127270 0.658601
7 -0.022060 -0.811084 -0.405764 0.350011 0.381710 -0.262666
8 0.961630 1.407288 1.551164 0.606159 -0.391772 0.029812
9 0.798279 1.109447 -0.137057 0.704421 -0.893816 1.290122
10 0.685212 1.076167 -0.288224 1.508940 0.286089 2.439563
11 0.803628 1.560996 0.774426 -0.123135 -0.047389 -0.441250
12 0.099890 1.445746 -1.453815 -0.214603 -1.139872 -0.252335
13 -0.212854 1.286504 0.720036 -0.872519 0.955706 -2.078957
14 -1.537501 -0.213678 -1.834954 -1.070992 1.539047 0.982703
15 -1.440013 0.872943 -0.809520 -0.147752 0.222384 -1.045942
16 -1.697309 0.606859 -1.747229 0.351080 1.440681 -1.505620
17 1.508685 0.904715 2.767009 0.452322 0.416081 -1.050028
18 0.675946 0.502807 1.115627 -1.151104 0.856946 -1.406132
19 1.299159 0.428687 1.764048 2.076242 0.575092 0.580747
20 0.769693 -0.740825 0.452717 -0.142755 0.170817 -0.358021
21 0.495856 -0.499875 -0.034482 0.350727 0.157251 0.626608
22 0.772290 -0.678641 0.547946 -0.428025 -1.433904 -0.355505
23 0.137654 1.496537 0.060579 -0.502941 0.411442 -0.122067
24 0.347129 0.809472 0.948516 -0.022364 -0.154971 -0.309935
25 0.029111 1.842864 0.123391 0.049825 0.617290 -0.085588
26 -1.473729 0.531152 0.801294 -1.410029 1.941482 0.485707
27 -0.484210 0.210847 1.009613 1.253693 -0.492018 -0.245022
28 -0.013618 -2.024383 -0.832429 0.712753 0.584767 0.136182
29 -1.746529 0.488857 -0.774537 0.083168 1.702499 -0.373082
... ... ... ... ... ... ...
279 -0.218279 -0.903841 -1.454712 1.847673 -0.519253 -0.262567
280 0.264235 -1.579208 -1.404331 -0.267295 1.169932 0.986186
281 -0.895563 0.461466 0.497480 0.976976 -0.096074 0.123883
282 -1.010758 0.423694 0.383226 0.839004 -0.068742 -0.327768
283 -1.300227 0.920815 0.844807 0.541693 0.080564 -0.199530
284 1.450169 0.335733 1.585783 0.110995 1.150821 -0.583010
285 1.913390 0.883060 1.899591 1.145796 0.539016 0.768216
286 1.444559 0.481538 1.742822 0.581765 1.145564 -0.912228
287 1.470621 -0.218928 -0.057911 -0.985989 -1.598797 -0.641710
288 1.040063 -0.416007 0.629628 -0.616364 -1.267930 -0.730043
289 1.328762 -0.491263 0.742595 -0.863279 -1.402041 -0.885662
290 0.397008 1.213991 -0.429044 0.832112 -0.843278 1.390832
291 0.172503 0.897500 -0.272973 0.210231 -0.458942 0.381469
292 0.661186 1.163101 -0.294020 -0.024527 -0.744258 0.845600
293 -0.773212 -0.895468 0.318110 1.184648 -0.212121 -0.353155
294 -0.370777 -1.414554 -1.094557 0.703279 1.214849 0.177043
295 0.138250 -1.936963 0.151220 0.398994 2.119363 1.534181
296 0.819776 -0.472427 -0.322556 0.733590 0.794066 0.850059
297 0.754438 -1.235411 0.322527 -0.225499 -1.850620 0.965915
298 1.007225 -0.846471 -0.431575 -0.067897 0.376757 0.841471
299 0.218592 -0.968903 -0.899045 -0.447626 -0.181640 -0.029332
300 0.664524 -0.896436 -1.249763 0.108770 0.071596 -0.691963
301 0.711838 -1.301487 -0.735548 -0.243133 0.655501 0.656529
302 -0.800747 0.870240 0.930120 1.819532 0.060854 0.468146
303 -1.334960 1.396005 2.690760 1.279657 0.956382 1.160282
304 -0.792172 0.906439 1.374725 1.647470 -0.613395 0.102004
305 -1.381202 0.689863 -0.347752 0.607044 -0.309056 -0.227433
306 -1.296109 0.634783 -0.484683 0.814045 -0.809678 0.515808
307 0.326809 -0.987801 -1.740993 0.307094 2.260097 0.394211
308 0.873341 2.041793 -1.371451 -2.438935 -0.567199 -1.300657

309 rows × 6 columns

In [275]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[275]:
[1854.0,
 1536.8736642821204,
 1324.7285075839484,
 1167.9156349888185,
 1059.6165149053068,
 950.057768838126,
 894.4360136638212,
 823.7421512103674,
 771.932101822877,
 716.3847905620515,
 681.7237006830833,
 653.1513609666642,
 638.9262100013641,
 609.9795957680683]
In [276]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[276]:
[<matplotlib.lines.Line2D at 0x1e831b5c9e8>]

K=4

In [277]:
kmeans_tc = KMeans(n_clusters=4, random_state=0, n_init=10)
kmeans_tc.fit(X)
Out[277]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=4, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [278]:
kmeans_tc.labels_
Out[278]:
array([2, 1, 3, 1, 3, 2, 2, 2, 1, 2, 2, 3, 1, 0, 0, 0, 0, 3, 0, 1, 3, 2,
       3, 0, 1, 0, 0, 1, 2, 0, 2, 2, 2, 1, 0, 2, 2, 3, 1, 1, 1, 2, 2, 2,
       2, 2, 1, 3, 3, 3, 1, 1, 1, 0, 0, 0, 2, 2, 3, 2, 0, 2, 0, 2, 2, 2,
       3, 1, 1, 1, 1, 3, 3, 3, 0, 0, 2, 1, 1, 1, 2, 2, 2, 0, 1, 0, 3, 3,
       3, 2, 0, 1, 0, 2, 1, 1, 1, 1, 2, 2, 2, 3, 0, 1, 1, 1, 3, 2, 0, 2,
       0, 3, 3, 3, 2, 0, 0, 1, 0, 0, 3, 3, 0, 1, 1, 0, 3, 3, 2, 0, 2, 0,
       0, 0, 3, 2, 1, 0, 0, 0, 3, 3, 1, 2, 3, 3, 0, 1, 0, 0, 3, 0, 1, 3,
       2, 0, 3, 1, 3, 1, 3, 2, 2, 3, 2, 2, 2, 2, 2, 2, 3, 3, 1, 1, 1, 3,
       3, 2, 2, 2, 2, 2, 2, 3, 1, 0, 0, 1, 2, 1, 1, 0, 0, 2, 3, 3, 1, 0,
       1, 0, 1, 2, 2, 2, 1, 0, 1, 3, 3, 3, 2, 2, 2, 0, 1, 1, 1, 0, 1, 1,
       1, 2, 2, 3, 1, 3, 3, 0, 3, 3, 3, 3, 1, 1, 3, 3, 3, 3, 0, 0, 0, 1,
       1, 3, 3, 1, 2, 3, 2, 3, 1, 3, 2, 0, 1, 2, 2, 2, 0, 1, 0, 3, 2, 3,
       0, 0, 2, 0, 0, 0, 3, 2, 2, 2, 1, 1, 2, 1, 0, 2, 2, 1, 1, 1, 3, 3,
       3, 3, 3, 3, 2, 1, 2, 1, 2, 2, 2, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 2,
       0])
In [279]:
clusters_tc = kmeans_tc.predict(X)
clusters_tc
Out[279]:
array([2, 1, 3, 1, 3, 2, 2, 2, 1, 2, 2, 3, 1, 0, 0, 0, 0, 3, 0, 1, 3, 2,
       3, 0, 1, 0, 0, 1, 2, 0, 2, 2, 2, 1, 0, 2, 2, 3, 1, 1, 1, 2, 2, 2,
       2, 2, 1, 3, 3, 3, 1, 1, 1, 0, 0, 0, 2, 2, 3, 2, 0, 2, 0, 2, 2, 2,
       3, 1, 1, 1, 1, 3, 3, 3, 0, 0, 2, 1, 1, 1, 2, 2, 2, 0, 1, 0, 3, 3,
       3, 2, 0, 1, 0, 2, 1, 1, 1, 1, 2, 2, 2, 3, 0, 1, 1, 1, 3, 2, 0, 2,
       0, 3, 3, 3, 2, 0, 0, 1, 0, 0, 3, 3, 0, 1, 1, 0, 3, 3, 2, 0, 2, 0,
       0, 0, 3, 2, 1, 0, 0, 0, 3, 3, 1, 2, 3, 3, 0, 1, 0, 0, 3, 0, 1, 3,
       2, 0, 3, 1, 3, 1, 3, 2, 2, 3, 2, 2, 2, 2, 2, 2, 3, 3, 1, 1, 1, 3,
       3, 2, 2, 2, 2, 2, 2, 3, 1, 0, 0, 1, 2, 1, 1, 0, 0, 2, 3, 3, 1, 0,
       1, 0, 1, 2, 2, 2, 1, 0, 1, 3, 3, 3, 2, 2, 2, 0, 1, 1, 1, 0, 1, 1,
       1, 2, 2, 3, 1, 3, 3, 0, 3, 3, 3, 3, 1, 1, 3, 3, 3, 3, 0, 0, 0, 1,
       1, 3, 3, 1, 2, 3, 2, 3, 1, 3, 2, 0, 1, 2, 2, 2, 0, 1, 0, 3, 2, 3,
       0, 0, 2, 0, 0, 0, 3, 2, 2, 2, 1, 1, 2, 1, 0, 2, 2, 1, 1, 1, 3, 3,
       3, 3, 3, 3, 2, 1, 2, 1, 2, 2, 2, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 2,
       0])
In [280]:
X.loc[:,'Cluster'] = clusters_tc
X.loc[:,'chosen'] = list(y)
In [281]:
X
Out[281]:
tonalcentroidfiles_1 tonalcentroidfiles_2 tonalcentroidfiles_3 tonalcentroidfiles_4 tonalcentroidfiles_5 tonalcentroidfiles_6 Cluster chosen
0 0.609365 0.535135 0.096066 1.915075 0.555249 1.854023 2 0
1 -0.157519 -1.311578 -0.486252 0.435334 -0.648735 -1.184658 1 0
2 -1.148976 -1.325889 0.573178 -1.556913 0.331644 1.236576 3 0
3 -0.996141 -0.557658 0.998693 -0.200592 -0.357882 0.086757 1 0
4 -0.648977 -0.248473 0.461357 -1.268368 -1.468590 -0.111563 3 0
5 0.642800 -1.165140 -1.792767 -0.560937 -0.122789 1.152255 2 0
6 0.443136 -0.482419 -1.016712 0.352897 -0.127270 0.658601 2 0
7 -0.022060 -0.811084 -0.405764 0.350011 0.381710 -0.262666 2 0
8 0.961630 1.407288 1.551164 0.606159 -0.391772 0.029812 1 0
9 0.798279 1.109447 -0.137057 0.704421 -0.893816 1.290122 2 0
10 0.685212 1.076167 -0.288224 1.508940 0.286089 2.439563 2 0
11 0.803628 1.560996 0.774426 -0.123135 -0.047389 -0.441250 3 0
12 0.099890 1.445746 -1.453815 -0.214603 -1.139872 -0.252335 1 0
13 -0.212854 1.286504 0.720036 -0.872519 0.955706 -2.078957 0 0
14 -1.537501 -0.213678 -1.834954 -1.070992 1.539047 0.982703 0 0
15 -1.440013 0.872943 -0.809520 -0.147752 0.222384 -1.045942 0 0
16 -1.697309 0.606859 -1.747229 0.351080 1.440681 -1.505620 0 0
17 1.508685 0.904715 2.767009 0.452322 0.416081 -1.050028 3 0
18 0.675946 0.502807 1.115627 -1.151104 0.856946 -1.406132 0 0
19 1.299159 0.428687 1.764048 2.076242 0.575092 0.580747 1 0
20 0.769693 -0.740825 0.452717 -0.142755 0.170817 -0.358021 3 0
21 0.495856 -0.499875 -0.034482 0.350727 0.157251 0.626608 2 0
22 0.772290 -0.678641 0.547946 -0.428025 -1.433904 -0.355505 3 0
23 0.137654 1.496537 0.060579 -0.502941 0.411442 -0.122067 0 0
24 0.347129 0.809472 0.948516 -0.022364 -0.154971 -0.309935 1 0
25 0.029111 1.842864 0.123391 0.049825 0.617290 -0.085588 0 0
26 -1.473729 0.531152 0.801294 -1.410029 1.941482 0.485707 0 0
27 -0.484210 0.210847 1.009613 1.253693 -0.492018 -0.245022 1 0
28 -0.013618 -2.024383 -0.832429 0.712753 0.584767 0.136182 2 0
29 -1.746529 0.488857 -0.774537 0.083168 1.702499 -0.373082 0 0
... ... ... ... ... ... ... ... ...
279 -0.218279 -0.903841 -1.454712 1.847673 -0.519253 -0.262567 2 1
280 0.264235 -1.579208 -1.404331 -0.267295 1.169932 0.986186 2 1
281 -0.895563 0.461466 0.497480 0.976976 -0.096074 0.123883 1 1
282 -1.010758 0.423694 0.383226 0.839004 -0.068742 -0.327768 1 1
283 -1.300227 0.920815 0.844807 0.541693 0.080564 -0.199530 1 1
284 1.450169 0.335733 1.585783 0.110995 1.150821 -0.583010 3 1
285 1.913390 0.883060 1.899591 1.145796 0.539016 0.768216 3 1
286 1.444559 0.481538 1.742822 0.581765 1.145564 -0.912228 3 1
287 1.470621 -0.218928 -0.057911 -0.985989 -1.598797 -0.641710 3 1
288 1.040063 -0.416007 0.629628 -0.616364 -1.267930 -0.730043 3 1
289 1.328762 -0.491263 0.742595 -0.863279 -1.402041 -0.885662 3 1
290 0.397008 1.213991 -0.429044 0.832112 -0.843278 1.390832 2 1
291 0.172503 0.897500 -0.272973 0.210231 -0.458942 0.381469 1 1
292 0.661186 1.163101 -0.294020 -0.024527 -0.744258 0.845600 2 1
293 -0.773212 -0.895468 0.318110 1.184648 -0.212121 -0.353155 1 1
294 -0.370777 -1.414554 -1.094557 0.703279 1.214849 0.177043 2 1
295 0.138250 -1.936963 0.151220 0.398994 2.119363 1.534181 2 1
296 0.819776 -0.472427 -0.322556 0.733590 0.794066 0.850059 2 1
297 0.754438 -1.235411 0.322527 -0.225499 -1.850620 0.965915 3 1
298 1.007225 -0.846471 -0.431575 -0.067897 0.376757 0.841471 2 1
299 0.218592 -0.968903 -0.899045 -0.447626 -0.181640 -0.029332 2 1
300 0.664524 -0.896436 -1.249763 0.108770 0.071596 -0.691963 2 1
301 0.711838 -1.301487 -0.735548 -0.243133 0.655501 0.656529 2 1
302 -0.800747 0.870240 0.930120 1.819532 0.060854 0.468146 1 1
303 -1.334960 1.396005 2.690760 1.279657 0.956382 1.160282 1 1
304 -0.792172 0.906439 1.374725 1.647470 -0.613395 0.102004 1 1
305 -1.381202 0.689863 -0.347752 0.607044 -0.309056 -0.227433 1 1
306 -1.296109 0.634783 -0.484683 0.814045 -0.809678 0.515808 1 1
307 0.326809 -0.987801 -1.740993 0.307094 2.260097 0.394211 2 1
308 0.873341 2.041793 -1.371451 -2.438935 -0.567199 -1.300657 0 1

309 rows × 8 columns

In [282]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[282]:
<matplotlib.axes._subplots.AxesSubplot at 0x1e831bdb940>
In [283]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[3]))

Hotel Marrakech

ANN

In [284]:
X = df_n_ps_std_tc[3]
In [285]:
y = df_n_ps[3]['chosen']
In [286]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [287]:
X_train.shape
Out[287]:
(139, 6)
In [288]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [289]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [290]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [291]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'relu', 'hidden_layer_sizes': (30,), 'learning_rate_init': 0.007, 'max_iter': 1000}, que permiten obtener un Accuracy de 80.58% y un Kappa del 50.08
Tiempo total: 22.27 minutos
In [292]:
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [293]:
input_tensor = Input(shape = (n0,))
In [294]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = 'tanh')(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [295]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [296]:
model.summary()
Model: "model_11"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_11 (InputLayer)        (None, 6)                 0         
_________________________________________________________________
dense_32 (Dense)             (None, 30)                210       
_________________________________________________________________
dense_33 (Dense)             (None, 1)                 31        
=================================================================
Total params: 241
Trainable params: 241
Non-trainable params: 0
_________________________________________________________________
In [297]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), batch_size= 32,
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 139 samples, validate on 47 samples
Epoch 1/1000
139/139 [==============================] - 0s 1ms/step - loss: 0.7563 - accuracy: 0.5036 - val_loss: 0.6519 - val_accuracy: 0.5745
Epoch 2/1000
139/139 [==============================] - 0s 72us/step - loss: 0.6579 - accuracy: 0.6043 - val_loss: 0.6624 - val_accuracy: 0.5957
Epoch 3/1000
139/139 [==============================] - 0s 65us/step - loss: 0.6098 - accuracy: 0.6906 - val_loss: 0.6754 - val_accuracy: 0.5745
Epoch 4/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5928 - accuracy: 0.7122 - val_loss: 0.6890 - val_accuracy: 0.5532
Epoch 5/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5868 - accuracy: 0.7194 - val_loss: 0.7103 - val_accuracy: 0.5532
Epoch 6/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5831 - accuracy: 0.7194 - val_loss: 0.7250 - val_accuracy: 0.5532
Epoch 7/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5798 - accuracy: 0.7194 - val_loss: 0.7405 - val_accuracy: 0.5532
Epoch 8/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5779 - accuracy: 0.7194 - val_loss: 0.7490 - val_accuracy: 0.5532
Epoch 9/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5762 - accuracy: 0.7194 - val_loss: 0.7536 - val_accuracy: 0.5532
Epoch 10/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5737 - accuracy: 0.7194 - val_loss: 0.7479 - val_accuracy: 0.5532
Epoch 11/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5735 - accuracy: 0.7194 - val_loss: 0.7463 - val_accuracy: 0.5532
Epoch 12/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5715 - accuracy: 0.7266 - val_loss: 0.7447 - val_accuracy: 0.5532

Epoch 00012: ReduceLROnPlateau reducing learning rate to 0.0035000001080334187.
Epoch 13/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5692 - accuracy: 0.7266 - val_loss: 0.7415 - val_accuracy: 0.5532
Epoch 14/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5676 - accuracy: 0.7194 - val_loss: 0.7365 - val_accuracy: 0.5532
Epoch 15/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5665 - accuracy: 0.7194 - val_loss: 0.7268 - val_accuracy: 0.5532
Epoch 16/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5654 - accuracy: 0.7194 - val_loss: 0.7263 - val_accuracy: 0.5532
Epoch 17/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5651 - accuracy: 0.7194 - val_loss: 0.7270 - val_accuracy: 0.5532
Epoch 18/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5623 - accuracy: 0.7194 - val_loss: 0.7290 - val_accuracy: 0.5532
Epoch 19/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5618 - accuracy: 0.7194 - val_loss: 0.7281 - val_accuracy: 0.5532
Epoch 20/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5617 - accuracy: 0.7194 - val_loss: 0.7288 - val_accuracy: 0.5532
Epoch 21/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5598 - accuracy: 0.7194 - val_loss: 0.7290 - val_accuracy: 0.5532
Epoch 22/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5594 - accuracy: 0.7194 - val_loss: 0.7279 - val_accuracy: 0.5532

Epoch 00022: ReduceLROnPlateau reducing learning rate to 0.0017500000540167093.
Epoch 23/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5574 - accuracy: 0.7266 - val_loss: 0.7265 - val_accuracy: 0.5532
Epoch 24/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5566 - accuracy: 0.7266 - val_loss: 0.7245 - val_accuracy: 0.5532
Epoch 25/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5560 - accuracy: 0.7266 - val_loss: 0.7253 - val_accuracy: 0.5532
Epoch 26/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5551 - accuracy: 0.7266 - val_loss: 0.7277 - val_accuracy: 0.5532
Epoch 27/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5548 - accuracy: 0.7266 - val_loss: 0.7293 - val_accuracy: 0.5532
Epoch 28/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5539 - accuracy: 0.7266 - val_loss: 0.7290 - val_accuracy: 0.5532
Epoch 29/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5536 - accuracy: 0.7266 - val_loss: 0.7301 - val_accuracy: 0.5532
Epoch 30/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5523 - accuracy: 0.7266 - val_loss: 0.7310 - val_accuracy: 0.5532
Epoch 31/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5516 - accuracy: 0.7266 - val_loss: 0.7322 - val_accuracy: 0.5532
Epoch 32/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5511 - accuracy: 0.7266 - val_loss: 0.7294 - val_accuracy: 0.5532

Epoch 00032: ReduceLROnPlateau reducing learning rate to 0.0008750000270083547.
Epoch 33/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5504 - accuracy: 0.7266 - val_loss: 0.7294 - val_accuracy: 0.5532
Epoch 34/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5501 - accuracy: 0.7266 - val_loss: 0.7283 - val_accuracy: 0.5532
Epoch 35/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5496 - accuracy: 0.7266 - val_loss: 0.7280 - val_accuracy: 0.5532
Epoch 36/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5495 - accuracy: 0.7266 - val_loss: 0.7291 - val_accuracy: 0.5532
Epoch 37/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5492 - accuracy: 0.7266 - val_loss: 0.7296 - val_accuracy: 0.5532
Epoch 38/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5489 - accuracy: 0.7266 - val_loss: 0.7316 - val_accuracy: 0.5532
Epoch 39/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5489 - accuracy: 0.7266 - val_loss: 0.7325 - val_accuracy: 0.5532
Epoch 40/1000
139/139 [==============================] - 0s 101us/step - loss: 0.5485 - accuracy: 0.7266 - val_loss: 0.7337 - val_accuracy: 0.5532
Epoch 41/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5483 - accuracy: 0.7266 - val_loss: 0.7347 - val_accuracy: 0.5532
Epoch 42/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5478 - accuracy: 0.7266 - val_loss: 0.7333 - val_accuracy: 0.5532

Epoch 00042: ReduceLROnPlateau reducing learning rate to 0.00043750001350417733.
Epoch 43/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5475 - accuracy: 0.7266 - val_loss: 0.7322 - val_accuracy: 0.5532
Epoch 44/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5472 - accuracy: 0.7266 - val_loss: 0.7315 - val_accuracy: 0.5532
Epoch 45/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5474 - accuracy: 0.7266 - val_loss: 0.7302 - val_accuracy: 0.5532
Epoch 46/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5468 - accuracy: 0.7266 - val_loss: 0.7292 - val_accuracy: 0.5532
Epoch 47/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5468 - accuracy: 0.7266 - val_loss: 0.7279 - val_accuracy: 0.5532
Epoch 48/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5466 - accuracy: 0.7266 - val_loss: 0.7272 - val_accuracy: 0.5532
Epoch 49/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5465 - accuracy: 0.7266 - val_loss: 0.7267 - val_accuracy: 0.5532
Epoch 50/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5463 - accuracy: 0.7266 - val_loss: 0.7264 - val_accuracy: 0.5532
Epoch 51/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5462 - accuracy: 0.7266 - val_loss: 0.7260 - val_accuracy: 0.5532
Epoch 52/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5460 - accuracy: 0.7266 - val_loss: 0.7267 - val_accuracy: 0.5532

Epoch 00052: ReduceLROnPlateau reducing learning rate to 0.00021875000675208867.
Epoch 53/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5459 - accuracy: 0.7266 - val_loss: 0.7270 - val_accuracy: 0.5532
Epoch 54/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5458 - accuracy: 0.7266 - val_loss: 0.7271 - val_accuracy: 0.5532
Epoch 55/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5457 - accuracy: 0.7266 - val_loss: 0.7270 - val_accuracy: 0.5532
Epoch 56/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5456 - accuracy: 0.7266 - val_loss: 0.7271 - val_accuracy: 0.5532
Epoch 57/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5456 - accuracy: 0.7266 - val_loss: 0.7273 - val_accuracy: 0.5532
Epoch 58/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5455 - accuracy: 0.7266 - val_loss: 0.7270 - val_accuracy: 0.5532
Epoch 59/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5454 - accuracy: 0.7266 - val_loss: 0.7267 - val_accuracy: 0.5532
Epoch 60/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5453 - accuracy: 0.7266 - val_loss: 0.7261 - val_accuracy: 0.5532
Epoch 61/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5453 - accuracy: 0.7266 - val_loss: 0.7254 - val_accuracy: 0.5532
Epoch 62/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5452 - accuracy: 0.7266 - val_loss: 0.7252 - val_accuracy: 0.5532

Epoch 00062: ReduceLROnPlateau reducing learning rate to 0.00010937500337604433.
Epoch 63/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5451 - accuracy: 0.7266 - val_loss: 0.7252 - val_accuracy: 0.5532
Epoch 64/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5450 - accuracy: 0.7266 - val_loss: 0.7250 - val_accuracy: 0.5532
Epoch 65/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5450 - accuracy: 0.7266 - val_loss: 0.7249 - val_accuracy: 0.5532
Epoch 66/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5450 - accuracy: 0.7266 - val_loss: 0.7249 - val_accuracy: 0.5532
Epoch 67/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5450 - accuracy: 0.7266 - val_loss: 0.7248 - val_accuracy: 0.5532
Epoch 68/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5449 - accuracy: 0.7266 - val_loss: 0.7250 - val_accuracy: 0.5532
Epoch 69/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5449 - accuracy: 0.7266 - val_loss: 0.7254 - val_accuracy: 0.5532
Epoch 70/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5448 - accuracy: 0.7266 - val_loss: 0.7255 - val_accuracy: 0.5532
Epoch 71/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5448 - accuracy: 0.7266 - val_loss: 0.7255 - val_accuracy: 0.5532
Epoch 72/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5448 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00072: ReduceLROnPlateau reducing learning rate to 5.4687501688022166e-05.
Epoch 73/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5447 - accuracy: 0.7266 - val_loss: 0.7256 - val_accuracy: 0.5532
Epoch 74/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5447 - accuracy: 0.7266 - val_loss: 0.7256 - val_accuracy: 0.5532
Epoch 75/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5447 - accuracy: 0.7266 - val_loss: 0.7256 - val_accuracy: 0.5532
Epoch 76/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5446 - accuracy: 0.7266 - val_loss: 0.7256 - val_accuracy: 0.5532
Epoch 77/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5446 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 78/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5446 - accuracy: 0.7266 - val_loss: 0.7256 - val_accuracy: 0.5532
Epoch 79/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5446 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 80/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5446 - accuracy: 0.7266 - val_loss: 0.7256 - val_accuracy: 0.5532
Epoch 81/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5445 - accuracy: 0.7266 - val_loss: 0.7256 - val_accuracy: 0.5532
Epoch 82/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5445 - accuracy: 0.7266 - val_loss: 0.7255 - val_accuracy: 0.5532

Epoch 00082: ReduceLROnPlateau reducing learning rate to 2.7343750844011083e-05.
Epoch 83/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5445 - accuracy: 0.7266 - val_loss: 0.7255 - val_accuracy: 0.5532
Epoch 84/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5445 - accuracy: 0.7266 - val_loss: 0.7255 - val_accuracy: 0.5532
Epoch 85/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5445 - accuracy: 0.7266 - val_loss: 0.7255 - val_accuracy: 0.5532
Epoch 86/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5445 - accuracy: 0.7266 - val_loss: 0.7255 - val_accuracy: 0.5532
Epoch 87/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5445 - accuracy: 0.7266 - val_loss: 0.7255 - val_accuracy: 0.5532
Epoch 88/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5445 - accuracy: 0.7266 - val_loss: 0.7255 - val_accuracy: 0.5532
Epoch 89/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5445 - accuracy: 0.7266 - val_loss: 0.7255 - val_accuracy: 0.5532
Epoch 90/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5444 - accuracy: 0.7266 - val_loss: 0.7255 - val_accuracy: 0.5532
Epoch 91/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5444 - accuracy: 0.7266 - val_loss: 0.7255 - val_accuracy: 0.5532
Epoch 92/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5444 - accuracy: 0.7266 - val_loss: 0.7255 - val_accuracy: 0.5532

Epoch 00092: ReduceLROnPlateau reducing learning rate to 1.3671875422005542e-05.
Epoch 93/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5444 - accuracy: 0.7266 - val_loss: 0.7255 - val_accuracy: 0.5532
Epoch 94/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5444 - accuracy: 0.7266 - val_loss: 0.7255 - val_accuracy: 0.5532
Epoch 95/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5444 - accuracy: 0.7266 - val_loss: 0.7255 - val_accuracy: 0.5532
Epoch 96/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5444 - accuracy: 0.7266 - val_loss: 0.7255 - val_accuracy: 0.5532
Epoch 97/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5444 - accuracy: 0.7266 - val_loss: 0.7255 - val_accuracy: 0.5532
Epoch 98/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5444 - accuracy: 0.7266 - val_loss: 0.7255 - val_accuracy: 0.5532
Epoch 99/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5444 - accuracy: 0.7266 - val_loss: 0.7256 - val_accuracy: 0.5532
Epoch 100/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5444 - accuracy: 0.7266 - val_loss: 0.7256 - val_accuracy: 0.5532
Epoch 101/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5444 - accuracy: 0.7266 - val_loss: 0.7256 - val_accuracy: 0.5532
Epoch 102/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5444 - accuracy: 0.7266 - val_loss: 0.7256 - val_accuracy: 0.5532

Epoch 00102: ReduceLROnPlateau reducing learning rate to 6.835937711002771e-06.
Epoch 103/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5444 - accuracy: 0.7266 - val_loss: 0.7256 - val_accuracy: 0.5532
Epoch 104/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5444 - accuracy: 0.7266 - val_loss: 0.7256 - val_accuracy: 0.5532
Epoch 105/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5444 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 106/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5444 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 107/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5444 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 108/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5444 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 109/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5444 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 110/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 111/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 112/1000
139/139 [==============================] - 0s 108us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00112: ReduceLROnPlateau reducing learning rate to 3.4179688555013854e-06.
Epoch 113/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 114/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 115/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 116/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 117/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 118/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 119/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 120/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 121/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 122/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00122: ReduceLROnPlateau reducing learning rate to 1.7089844277506927e-06.
Epoch 123/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 124/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 125/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 126/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 127/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 128/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 129/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 130/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 131/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 132/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00132: ReduceLROnPlateau reducing learning rate to 8.544922138753464e-07.
Epoch 133/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 134/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 135/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 136/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 137/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 138/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 139/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 140/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 141/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 142/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00142: ReduceLROnPlateau reducing learning rate to 4.272461069376732e-07.
Epoch 143/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 144/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 145/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 146/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 147/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 148/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 149/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 150/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 151/1000
139/139 [==============================] - 0s 115us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 152/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00152: ReduceLROnPlateau reducing learning rate to 2.136230534688366e-07.
Epoch 153/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 154/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 155/1000
139/139 [==============================] - 0s 101us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 156/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 157/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 158/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 159/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 160/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 161/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 162/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00162: ReduceLROnPlateau reducing learning rate to 1.068115267344183e-07.
Epoch 163/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 164/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 165/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 166/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 167/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 168/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 169/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 170/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 171/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 172/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00172: ReduceLROnPlateau reducing learning rate to 5.340576336720915e-08.
Epoch 173/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 174/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 175/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 176/1000
139/139 [==============================] - 0s 101us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 177/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 178/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 179/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 180/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 181/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 182/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00182: ReduceLROnPlateau reducing learning rate to 2.6702881683604573e-08.
Epoch 183/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 184/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 185/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 186/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 187/1000
139/139 [==============================] - 0s 101us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 188/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 189/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 190/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 191/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 192/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00192: ReduceLROnPlateau reducing learning rate to 1.3351440841802287e-08.
Epoch 193/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 194/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 195/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 196/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 197/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 198/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 199/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 200/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 201/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 202/1000
139/139 [==============================] - 0s 108us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00202: ReduceLROnPlateau reducing learning rate to 6.675720420901143e-09.
Epoch 203/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 204/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 205/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 206/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 207/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 208/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 209/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 210/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 211/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 212/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00212: ReduceLROnPlateau reducing learning rate to 3.3378602104505717e-09.
Epoch 213/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 214/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 215/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 216/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 217/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 218/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 219/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 220/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 221/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 222/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00222: ReduceLROnPlateau reducing learning rate to 1.6689301052252858e-09.
Epoch 223/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 224/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 225/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 226/1000
139/139 [==============================] - 0s 101us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 227/1000
139/139 [==============================] - 0s 101us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 228/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 229/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 230/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 231/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 232/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00232: ReduceLROnPlateau reducing learning rate to 8.344650526126429e-10.
Epoch 233/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 234/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 235/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 236/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 237/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 238/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 239/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 240/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 241/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 242/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00242: ReduceLROnPlateau reducing learning rate to 4.1723252630632146e-10.
Epoch 243/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 244/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 245/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 246/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 247/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 248/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 249/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 250/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 251/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 252/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00252: ReduceLROnPlateau reducing learning rate to 2.0861626315316073e-10.
Epoch 253/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 254/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 255/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 256/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 257/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 258/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 259/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 260/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 261/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 262/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00262: ReduceLROnPlateau reducing learning rate to 1.0430813157658037e-10.
Epoch 263/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 264/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 265/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 266/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 267/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 268/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 269/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 270/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 271/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 272/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00272: ReduceLROnPlateau reducing learning rate to 5.215406578829018e-11.
Epoch 273/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 274/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 275/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 276/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 277/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 278/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 279/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 280/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 281/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 282/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00282: ReduceLROnPlateau reducing learning rate to 2.607703289414509e-11.
Epoch 283/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 284/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 285/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 286/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 287/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 288/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 289/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 290/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 291/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 292/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00292: ReduceLROnPlateau reducing learning rate to 1.3038516447072546e-11.
Epoch 293/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 294/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 295/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 296/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 297/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 298/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 299/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 300/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 301/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 302/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00302: ReduceLROnPlateau reducing learning rate to 6.519258223536273e-12.
Epoch 303/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 304/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 305/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 306/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 307/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 308/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 309/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 310/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 311/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 312/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00312: ReduceLROnPlateau reducing learning rate to 3.2596291117681364e-12.
Epoch 313/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 314/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 315/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 316/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 317/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 318/1000
139/139 [==============================] - 0s 108us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 319/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 320/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 321/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 322/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00322: ReduceLROnPlateau reducing learning rate to 1.6298145558840682e-12.
Epoch 323/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 324/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 325/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 326/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 327/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 328/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 329/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 330/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 331/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 332/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00332: ReduceLROnPlateau reducing learning rate to 8.149072779420341e-13.
Epoch 333/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 334/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 335/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 336/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 337/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 338/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 339/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 340/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 341/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 342/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00342: ReduceLROnPlateau reducing learning rate to 4.0745363897101705e-13.
Epoch 343/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 344/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 345/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 346/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 347/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 348/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 349/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 350/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 351/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 352/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00352: ReduceLROnPlateau reducing learning rate to 2.0372681948550853e-13.
Epoch 353/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 354/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 355/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 356/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 357/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 358/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 359/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 360/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 361/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 362/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00362: ReduceLROnPlateau reducing learning rate to 1.0186340974275426e-13.
Epoch 363/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 364/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 365/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 366/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 367/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 368/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 369/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 370/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 371/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 372/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00372: ReduceLROnPlateau reducing learning rate to 5.093170487137713e-14.
Epoch 373/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 374/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 375/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 376/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 377/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 378/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 379/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 380/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 381/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 382/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00382: ReduceLROnPlateau reducing learning rate to 2.5465852435688566e-14.
Epoch 383/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 384/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 385/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 386/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 387/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 388/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 389/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 390/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 391/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 392/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00392: ReduceLROnPlateau reducing learning rate to 1.2732926217844283e-14.
Epoch 393/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 394/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 395/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 396/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 397/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 398/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 399/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 400/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 401/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 402/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00402: ReduceLROnPlateau reducing learning rate to 6.3664631089221414e-15.
Epoch 403/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 404/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 405/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 406/1000
139/139 [==============================] - 0s 101us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 407/1000
139/139 [==============================] - 0s 101us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 408/1000
139/139 [==============================] - 0s 101us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 409/1000
139/139 [==============================] - 0s 101us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 410/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 411/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 412/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00412: ReduceLROnPlateau reducing learning rate to 3.1832315544610707e-15.
Epoch 413/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 414/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 415/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 416/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 417/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 418/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 419/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 420/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 421/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 422/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00422: ReduceLROnPlateau reducing learning rate to 1.5916157772305354e-15.
Epoch 423/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 424/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 425/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 426/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 427/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 428/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 429/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 430/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 431/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 432/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00432: ReduceLROnPlateau reducing learning rate to 7.958078886152677e-16.
Epoch 433/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 434/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 435/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 436/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 437/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 438/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 439/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 440/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 441/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 442/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00442: ReduceLROnPlateau reducing learning rate to 3.9790394430763384e-16.
Epoch 443/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 444/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 445/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 446/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 447/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 448/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 449/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 450/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 451/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 452/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00452: ReduceLROnPlateau reducing learning rate to 1.9895197215381692e-16.
Epoch 453/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 454/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 455/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 456/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 457/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 458/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 459/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 460/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 461/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 462/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00462: ReduceLROnPlateau reducing learning rate to 9.947598607690846e-17.
Epoch 463/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 464/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 465/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 466/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 467/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 468/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 469/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 470/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 471/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 472/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00472: ReduceLROnPlateau reducing learning rate to 4.973799303845423e-17.
Epoch 473/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 474/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 475/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 476/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 477/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 478/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 479/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 480/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 481/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 482/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00482: ReduceLROnPlateau reducing learning rate to 2.4868996519227115e-17.
Epoch 483/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 484/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 485/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 486/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 487/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 488/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 489/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 490/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 491/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 492/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00492: ReduceLROnPlateau reducing learning rate to 1.2434498259613557e-17.
Epoch 493/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 494/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 495/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 496/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 497/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 498/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 499/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 500/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 501/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 502/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00502: ReduceLROnPlateau reducing learning rate to 6.217249129806779e-18.
Epoch 503/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 504/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 505/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 506/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 507/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 508/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 509/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 510/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 511/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 512/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00512: ReduceLROnPlateau reducing learning rate to 3.1086245649033894e-18.
Epoch 513/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 514/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 515/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 516/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 517/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 518/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 519/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 520/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 521/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 522/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00522: ReduceLROnPlateau reducing learning rate to 1.5543122824516947e-18.
Epoch 523/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 524/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 525/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 526/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 527/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 528/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 529/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 530/1000
139/139 [==============================] - 0s 101us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 531/1000
139/139 [==============================] - 0s 108us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 532/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00532: ReduceLROnPlateau reducing learning rate to 7.771561412258473e-19.
Epoch 533/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 534/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 535/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 536/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 537/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 538/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 539/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 540/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 541/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 542/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00542: ReduceLROnPlateau reducing learning rate to 3.8857807061292367e-19.
Epoch 543/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 544/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 545/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 546/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 547/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 548/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 549/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 550/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 551/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 552/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00552: ReduceLROnPlateau reducing learning rate to 1.9428903530646184e-19.
Epoch 553/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 554/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 555/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 556/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 557/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 558/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 559/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 560/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 561/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 562/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00562: ReduceLROnPlateau reducing learning rate to 9.714451765323092e-20.
Epoch 563/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 564/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 565/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 566/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 567/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 568/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 569/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 570/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 571/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 572/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00572: ReduceLROnPlateau reducing learning rate to 4.857225882661546e-20.
Epoch 573/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 574/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 575/1000
139/139 [==============================] - 0s 101us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 576/1000
139/139 [==============================] - 0s 122us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 577/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 578/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 579/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 580/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 581/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 582/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00582: ReduceLROnPlateau reducing learning rate to 2.428612941330773e-20.
Epoch 583/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 584/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 585/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 586/1000
139/139 [==============================] - 0s 108us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 587/1000
139/139 [==============================] - 0s 115us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 588/1000
139/139 [==============================] - 0s 144us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 589/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 590/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 591/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 592/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00592: ReduceLROnPlateau reducing learning rate to 1.2143064706653865e-20.
Epoch 593/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 594/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 595/1000
139/139 [==============================] - 0s 108us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 596/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 597/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 598/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 599/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 600/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 601/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 602/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00602: ReduceLROnPlateau reducing learning rate to 6.071532353326932e-21.
Epoch 603/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 604/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 605/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 606/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 607/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 608/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 609/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 610/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 611/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 612/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00612: ReduceLROnPlateau reducing learning rate to 3.035766176663466e-21.
Epoch 613/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 614/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 615/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 616/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 617/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 618/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 619/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 620/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 621/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 622/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00622: ReduceLROnPlateau reducing learning rate to 1.517883088331733e-21.
Epoch 623/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 624/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 625/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 626/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 627/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 628/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 629/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 630/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 631/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 632/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00632: ReduceLROnPlateau reducing learning rate to 7.589415441658665e-22.
Epoch 633/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 634/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 635/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 636/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 637/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 638/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 639/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 640/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 641/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 642/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00642: ReduceLROnPlateau reducing learning rate to 3.7947077208293327e-22.
Epoch 643/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 644/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 645/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 646/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 647/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 648/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 649/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 650/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 651/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 652/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00652: ReduceLROnPlateau reducing learning rate to 1.8973538604146664e-22.
Epoch 653/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 654/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 655/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 656/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 657/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 658/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 659/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 660/1000
139/139 [==============================] - 0s 122us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 661/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 662/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00662: ReduceLROnPlateau reducing learning rate to 9.486769302073332e-23.
Epoch 663/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 664/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 665/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 666/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 667/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 668/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 669/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 670/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 671/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 672/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00672: ReduceLROnPlateau reducing learning rate to 4.743384651036666e-23.
Epoch 673/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 674/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 675/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 676/1000
139/139 [==============================] - 0s 101us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 677/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 678/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 679/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 680/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 681/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 682/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00682: ReduceLROnPlateau reducing learning rate to 2.371692325518333e-23.
Epoch 683/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 684/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 685/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 686/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 687/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 688/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 689/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 690/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 691/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 692/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00692: ReduceLROnPlateau reducing learning rate to 1.1858461627591665e-23.
Epoch 693/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 694/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 695/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 696/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 697/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 698/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 699/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 700/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 701/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 702/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00702: ReduceLROnPlateau reducing learning rate to 5.9292308137958324e-24.
Epoch 703/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 704/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 705/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 706/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 707/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 708/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 709/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 710/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 711/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 712/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00712: ReduceLROnPlateau reducing learning rate to 2.9646154068979162e-24.
Epoch 713/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 714/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 715/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 716/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 717/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 718/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 719/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 720/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 721/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 722/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00722: ReduceLROnPlateau reducing learning rate to 1.4823077034489581e-24.
Epoch 723/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 724/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 725/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 726/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 727/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 728/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 729/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 730/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 731/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 732/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00732: ReduceLROnPlateau reducing learning rate to 7.4115385172447905e-25.
Epoch 733/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 734/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 735/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 736/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 737/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 738/1000
139/139 [==============================] - 0s 252us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 739/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 740/1000
139/139 [==============================] - 0s 309us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 741/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 742/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00742: ReduceLROnPlateau reducing learning rate to 3.7057692586223952e-25.
Epoch 743/1000
139/139 [==============================] - 0s 180us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 744/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 745/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 746/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 747/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 748/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 749/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 750/1000
139/139 [==============================] - 0s 187us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 751/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 752/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00752: ReduceLROnPlateau reducing learning rate to 1.8528846293111976e-25.
Epoch 753/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 754/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 755/1000
139/139 [==============================] - 0s 137us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 756/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 757/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 758/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 759/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 760/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 761/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 762/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00762: ReduceLROnPlateau reducing learning rate to 9.264423146555988e-26.
Epoch 763/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 764/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 765/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 766/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 767/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 768/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 769/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 770/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 771/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 772/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00772: ReduceLROnPlateau reducing learning rate to 4.632211573277994e-26.
Epoch 773/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 774/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 775/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 776/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 777/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 778/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 779/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 780/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 781/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 782/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00782: ReduceLROnPlateau reducing learning rate to 2.316105786638997e-26.
Epoch 783/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 784/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 785/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 786/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 787/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 788/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 789/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 790/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 791/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 792/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00792: ReduceLROnPlateau reducing learning rate to 1.1580528933194985e-26.
Epoch 793/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 794/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 795/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 796/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 797/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 798/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 799/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 800/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 801/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 802/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00802: ReduceLROnPlateau reducing learning rate to 5.7902644665974926e-27.
Epoch 803/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 804/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 805/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 806/1000
139/139 [==============================] - 0s 58us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 807/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 808/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 809/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 810/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 811/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 812/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00812: ReduceLROnPlateau reducing learning rate to 2.8951322332987463e-27.
Epoch 813/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 814/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 815/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 816/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 817/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 818/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 819/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 820/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 821/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 822/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00822: ReduceLROnPlateau reducing learning rate to 1.4475661166493731e-27.
Epoch 823/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 824/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 825/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 826/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 827/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 828/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 829/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 830/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 831/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 832/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00832: ReduceLROnPlateau reducing learning rate to 7.237830583246866e-28.
Epoch 833/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 834/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 835/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 836/1000
139/139 [==============================] - 0s 101us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 837/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 838/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 839/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 840/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 841/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 842/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00842: ReduceLROnPlateau reducing learning rate to 3.618915291623433e-28.
Epoch 843/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 844/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 845/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 846/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 847/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 848/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 849/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 850/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 851/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 852/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00852: ReduceLROnPlateau reducing learning rate to 1.8094576458117164e-28.
Epoch 853/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 854/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 855/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 856/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 857/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 858/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 859/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 860/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 861/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 862/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00862: ReduceLROnPlateau reducing learning rate to 9.047288229058582e-29.
Epoch 863/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 864/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 865/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 866/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 867/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 868/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 869/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 870/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 871/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 872/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00872: ReduceLROnPlateau reducing learning rate to 4.523644114529291e-29.
Epoch 873/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 874/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 875/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 876/1000
139/139 [==============================] - 0s 65us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 877/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 878/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 879/1000
139/139 [==============================] - 0s 108us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 880/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 881/1000
139/139 [==============================] - 0s 101us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 882/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00882: ReduceLROnPlateau reducing learning rate to 2.2618220572646455e-29.
Epoch 883/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 884/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 885/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 886/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 887/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 888/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 889/1000
139/139 [==============================] - 0s 101us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 890/1000
139/139 [==============================] - 0s 115us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 891/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 892/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00892: ReduceLROnPlateau reducing learning rate to 1.1309110286323228e-29.
Epoch 893/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 894/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 895/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 896/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 897/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 898/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 899/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 900/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 901/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 902/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00902: ReduceLROnPlateau reducing learning rate to 5.654555143161614e-30.
Epoch 903/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 904/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 905/1000
139/139 [==============================] - 0s 101us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 906/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 907/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 908/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 909/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 910/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 911/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 912/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00912: ReduceLROnPlateau reducing learning rate to 2.827277571580807e-30.
Epoch 913/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 914/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 915/1000
139/139 [==============================] - 0s 101us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 916/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 917/1000
139/139 [==============================] - 0s 122us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 918/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 919/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 920/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 921/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 922/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00922: ReduceLROnPlateau reducing learning rate to 1.4136387857904035e-30.
Epoch 923/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 924/1000
139/139 [==============================] - 0s 101us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 925/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 926/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 927/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 928/1000
139/139 [==============================] - 0s 101us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 929/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 930/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 931/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 932/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00932: ReduceLROnPlateau reducing learning rate to 7.068193928952017e-31.
Epoch 933/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 934/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 935/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 936/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 937/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 938/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 939/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 940/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 941/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 942/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00942: ReduceLROnPlateau reducing learning rate to 3.5340969644760086e-31.
Epoch 943/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 944/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 945/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 946/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 947/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 948/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 949/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 950/1000
139/139 [==============================] - 0s 101us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 951/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 952/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00952: ReduceLROnPlateau reducing learning rate to 1.7670484822380043e-31.
Epoch 953/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 954/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 955/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 956/1000
139/139 [==============================] - 0s 129us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 957/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 958/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 959/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 960/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 961/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 962/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00962: ReduceLROnPlateau reducing learning rate to 8.835242411190022e-32.
Epoch 963/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 964/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 965/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 966/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 967/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 968/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 969/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 970/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 971/1000
139/139 [==============================] - 0s 72us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 972/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00972: ReduceLROnPlateau reducing learning rate to 4.417621205595011e-32.
Epoch 973/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 974/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 975/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 976/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 977/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 978/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 979/1000
139/139 [==============================] - 0s 115us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 980/1000
139/139 [==============================] - 0s 101us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 981/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 982/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00982: ReduceLROnPlateau reducing learning rate to 2.2088106027975054e-32.
Epoch 983/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 984/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 985/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 986/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 987/1000
139/139 [==============================] - 0s 101us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 988/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 989/1000
139/139 [==============================] - 0s 101us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 990/1000
139/139 [==============================] - 0s 101us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 991/1000
139/139 [==============================] - 0s 101us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 992/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532

Epoch 00992: ReduceLROnPlateau reducing learning rate to 1.1044053013987527e-32.
Epoch 993/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 994/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 995/1000
139/139 [==============================] - 0s 93us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 996/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 997/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 998/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 999/1000
139/139 [==============================] - 0s 79us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
Epoch 1000/1000
139/139 [==============================] - 0s 86us/step - loss: 0.5443 - accuracy: 0.7266 - val_loss: 0.7257 - val_accuracy: 0.5532
In [298]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 1000)
In [299]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
47/47 [==============================] - 0s 42us/step
test loss: 0.7256842750184079, test accuracy: 0.5531914830207825
In [300]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.6501831501831502
In [301]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
Kappa:  0.0

KMeans

In [302]:
X
Out[302]:
tonalcentroidfiles_1 tonalcentroidfiles_2 tonalcentroidfiles_3 tonalcentroidfiles_4 tonalcentroidfiles_5 tonalcentroidfiles_6
0 1.347152 0.215026 0.572244 1.090273 0.291013 0.262601
1 0.820325 -1.142202 -0.584840 -1.206404 0.498688 -0.437268
2 -0.368979 -1.101005 1.316826 1.007755 -0.300511 -1.688750
3 0.137511 -1.462891 1.217279 0.145184 -0.233709 -0.483200
4 0.067094 -1.364744 1.323658 -0.374292 -0.763515 -1.012955
5 -0.308205 1.554912 0.177715 -0.789365 0.203522 -0.138956
6 -0.229900 1.130218 -0.432863 -0.017215 -0.028738 0.374753
7 0.128919 -2.154466 -1.473142 -0.706391 0.582734 -1.413063
8 -0.428809 1.078779 0.122401 0.073561 1.095684 0.487808
9 0.508739 -1.275256 0.146667 0.082460 0.735466 -0.812000
10 0.236456 -1.307369 -0.087549 -1.929049 -0.896450 -0.085837
11 -0.332097 -2.136115 -0.379978 0.106336 0.131067 -0.494191
12 0.878776 -0.818718 -0.318688 -0.206650 -0.135265 -1.505040
13 -0.503536 -1.372498 -0.663743 -0.059237 0.168824 0.591813
14 -0.225399 -2.333299 -0.977473 -0.503168 0.734560 -0.229386
15 1.637443 0.565792 1.742275 -0.855040 0.086757 -1.781283
16 1.593976 -0.388711 -1.659037 0.097761 0.429256 2.040221
17 1.453127 -0.044197 0.718333 0.786724 -0.814130 -0.463517
18 0.574197 -0.311820 -1.315952 0.601889 -0.114239 1.343490
19 1.285525 0.753218 -0.678607 1.972188 0.383330 1.855540
20 -0.420448 -0.248495 0.126628 -0.407281 0.328979 0.844612
21 0.782508 -0.432480 -1.645501 -0.097137 -0.510175 0.780762
22 1.220720 0.555439 1.643752 1.016671 0.036438 -0.204417
23 1.276232 -0.970408 -2.218229 -1.985819 0.181550 0.875360
24 0.330249 0.500340 0.945837 0.170144 -1.951811 0.728661
25 -0.457742 1.246710 1.511768 -0.330001 -0.839662 0.686756
26 -0.128012 0.619401 1.421912 -0.281963 -2.498478 0.602396
27 1.567570 -0.230001 1.237739 0.280013 0.089273 -0.372351
28 -0.343287 -1.213337 1.470523 -0.507041 -1.296801 -0.318921
29 -1.776654 -1.197082 0.552844 -0.390964 -0.822647 -0.655686
... ... ... ... ... ... ...
156 1.136603 1.061921 -1.369527 0.349165 -0.306267 1.859464
157 -0.957141 0.412565 -0.099705 0.507263 0.380704 -1.450660
158 0.627291 -0.982430 0.247369 -0.335773 -0.880954 1.943513
159 0.605582 -0.713493 -0.033525 0.066891 -0.330645 0.762021
160 0.435082 -1.765650 -0.206944 -2.176473 -1.106134 1.293098
161 -0.869679 -0.284973 -0.185255 1.340517 -1.710248 -1.319612
162 1.695010 0.478353 -2.356288 0.126103 -0.667751 -0.738760
163 1.201750 -0.589959 -0.583958 1.265372 2.093314 0.102967
164 0.602223 1.408737 0.093544 -1.913840 0.673872 -1.282054
165 1.273699 0.835188 0.238069 -0.918903 0.953651 0.759154
166 0.910091 0.783406 -0.282440 -0.757994 -0.153400 0.541408
167 0.784319 -0.628463 0.531487 -0.758651 -0.436559 0.237206
168 0.597174 -0.260556 0.776122 0.565709 -0.698971 0.850620
169 0.512245 -0.906993 0.867897 0.635716 -0.539661 0.041101
170 0.766694 0.045665 1.652744 -0.681039 -2.002121 -0.208060
171 0.729920 -0.012315 1.424850 -1.133758 -1.844432 -0.865224
172 -0.250595 1.333637 0.735398 -2.185661 -0.923218 -0.293878
173 0.239742 1.083681 -0.966488 -0.543890 -0.015042 0.298120
174 0.318041 0.919114 -0.293494 -1.777415 -0.452843 0.237000
175 -0.249524 -1.310616 -1.629695 0.599616 1.389241 0.190145
176 -0.239498 -0.884659 -1.273002 0.946824 0.600415 -0.458834
177 -0.247995 -0.379289 0.038455 -0.181334 0.555713 -0.343901
178 -1.538412 0.875654 -1.387902 -0.180395 0.008275 -0.438000
179 -0.858788 -0.062931 -0.178983 -0.593047 1.845652 -1.559059
180 0.044348 0.420883 -0.678748 1.238163 1.329929 -0.157668
181 1.376463 -1.287646 0.773584 1.157342 -0.286084 -1.054513
182 0.948949 -1.179621 1.457273 1.080025 1.950553 0.140886
183 1.235651 -0.462711 0.631757 -1.169777 -0.693590 0.281712
184 0.668597 1.124688 -1.524719 0.905841 -1.384876 0.539531
185 -0.966564 0.725873 0.168576 -0.106983 -0.071778 -0.257675

186 rows × 6 columns

In [303]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[303]:
[1116.0000000000002,
 927.5012922756229,
 805.362167602635,
 709.3398851775523,
 644.8353925193064,
 590.0040583169643,
 547.564638663471,
 517.2051050812995,
 492.673077864197,
 467.91458811999206,
 449.82839909018185,
 426.939832241735,
 408.8927579760986,
 392.386338225884]
In [304]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[304]:
[<matplotlib.lines.Line2D at 0x1e8302657f0>]

K=4

In [305]:
kmeans_tc = KMeans(n_clusters=4, random_state=0, n_init=10)
kmeans_tc.fit(X)
Out[305]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=4, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [306]:
kmeans_tc.labels_
Out[306]:
array([1, 2, 2, 2, 2, 3, 1, 2, 3, 2, 2, 2, 2, 1, 2, 2, 1, 2, 1, 1, 1, 1,
       2, 1, 0, 0, 0, 2, 2, 0, 0, 0, 0, 2, 1, 1, 3, 0, 0, 2, 2, 2, 1, 1,
       1, 0, 3, 3, 3, 0, 3, 3, 1, 1, 1, 0, 3, 0, 1, 1, 1, 1, 2, 2, 1, 0,
       2, 0, 0, 3, 3, 0, 0, 1, 1, 0, 3, 3, 1, 2, 2, 2, 2, 2, 1, 2, 3, 3,
       0, 0, 0, 2, 1, 1, 3, 3, 3, 0, 2, 0, 3, 3, 3, 1, 2, 0, 3, 0, 1, 3,
       2, 0, 0, 3, 0, 0, 0, 0, 2, 0, 2, 2, 3, 2, 2, 1, 2, 3, 2, 1, 2, 1,
       1, 1, 3, 3, 1, 3, 1, 0, 3, 1, 3, 2, 3, 3, 3, 3, 0, 0, 0, 3, 0, 3,
       1, 2, 1, 3, 1, 1, 2, 0, 1, 1, 3, 1, 1, 2, 2, 2, 2, 2, 3, 1, 1, 3,
       3, 3, 3, 3, 3, 2, 2, 2, 1, 3])
In [307]:
clusters_tc = kmeans_tc.predict(X)
clusters_tc
Out[307]:
array([1, 2, 2, 2, 2, 3, 1, 2, 3, 2, 2, 2, 2, 1, 2, 2, 1, 2, 1, 1, 1, 1,
       2, 1, 0, 0, 0, 2, 2, 0, 0, 0, 0, 2, 1, 1, 3, 0, 0, 2, 2, 2, 1, 1,
       1, 0, 3, 3, 3, 0, 3, 3, 1, 1, 1, 0, 3, 0, 1, 1, 1, 1, 2, 2, 1, 0,
       2, 0, 0, 3, 3, 0, 0, 1, 1, 0, 3, 3, 1, 2, 2, 2, 2, 2, 1, 2, 3, 3,
       0, 0, 0, 2, 1, 1, 3, 3, 3, 0, 2, 0, 3, 3, 3, 1, 2, 0, 3, 0, 1, 3,
       2, 0, 0, 3, 0, 0, 0, 0, 2, 0, 2, 2, 3, 2, 2, 1, 2, 3, 2, 1, 2, 1,
       1, 1, 3, 3, 1, 3, 1, 0, 3, 1, 3, 2, 3, 3, 3, 3, 0, 0, 0, 3, 0, 3,
       1, 2, 1, 3, 1, 1, 2, 0, 1, 1, 3, 1, 1, 2, 2, 2, 2, 2, 3, 1, 1, 3,
       3, 3, 3, 3, 3, 2, 2, 2, 1, 3])
In [308]:
X.loc[:,'Cluster'] = clusters_tc
X.loc[:,'chosen'] = list(y)
In [309]:
X
Out[309]:
tonalcentroidfiles_1 tonalcentroidfiles_2 tonalcentroidfiles_3 tonalcentroidfiles_4 tonalcentroidfiles_5 tonalcentroidfiles_6 Cluster chosen
0 1.347152 0.215026 0.572244 1.090273 0.291013 0.262601 1 0
1 0.820325 -1.142202 -0.584840 -1.206404 0.498688 -0.437268 2 0
2 -0.368979 -1.101005 1.316826 1.007755 -0.300511 -1.688750 2 0
3 0.137511 -1.462891 1.217279 0.145184 -0.233709 -0.483200 2 0
4 0.067094 -1.364744 1.323658 -0.374292 -0.763515 -1.012955 2 0
5 -0.308205 1.554912 0.177715 -0.789365 0.203522 -0.138956 3 0
6 -0.229900 1.130218 -0.432863 -0.017215 -0.028738 0.374753 1 0
7 0.128919 -2.154466 -1.473142 -0.706391 0.582734 -1.413063 2 0
8 -0.428809 1.078779 0.122401 0.073561 1.095684 0.487808 3 0
9 0.508739 -1.275256 0.146667 0.082460 0.735466 -0.812000 2 0
10 0.236456 -1.307369 -0.087549 -1.929049 -0.896450 -0.085837 2 0
11 -0.332097 -2.136115 -0.379978 0.106336 0.131067 -0.494191 2 0
12 0.878776 -0.818718 -0.318688 -0.206650 -0.135265 -1.505040 2 0
13 -0.503536 -1.372498 -0.663743 -0.059237 0.168824 0.591813 1 0
14 -0.225399 -2.333299 -0.977473 -0.503168 0.734560 -0.229386 2 0
15 1.637443 0.565792 1.742275 -0.855040 0.086757 -1.781283 2 0
16 1.593976 -0.388711 -1.659037 0.097761 0.429256 2.040221 1 0
17 1.453127 -0.044197 0.718333 0.786724 -0.814130 -0.463517 2 0
18 0.574197 -0.311820 -1.315952 0.601889 -0.114239 1.343490 1 0
19 1.285525 0.753218 -0.678607 1.972188 0.383330 1.855540 1 0
20 -0.420448 -0.248495 0.126628 -0.407281 0.328979 0.844612 1 0
21 0.782508 -0.432480 -1.645501 -0.097137 -0.510175 0.780762 1 0
22 1.220720 0.555439 1.643752 1.016671 0.036438 -0.204417 2 0
23 1.276232 -0.970408 -2.218229 -1.985819 0.181550 0.875360 1 0
24 0.330249 0.500340 0.945837 0.170144 -1.951811 0.728661 0 0
25 -0.457742 1.246710 1.511768 -0.330001 -0.839662 0.686756 0 0
26 -0.128012 0.619401 1.421912 -0.281963 -2.498478 0.602396 0 0
27 1.567570 -0.230001 1.237739 0.280013 0.089273 -0.372351 2 0
28 -0.343287 -1.213337 1.470523 -0.507041 -1.296801 -0.318921 2 0
29 -1.776654 -1.197082 0.552844 -0.390964 -0.822647 -0.655686 0 0
... ... ... ... ... ... ... ... ...
156 1.136603 1.061921 -1.369527 0.349165 -0.306267 1.859464 1 1
157 -0.957141 0.412565 -0.099705 0.507263 0.380704 -1.450660 3 1
158 0.627291 -0.982430 0.247369 -0.335773 -0.880954 1.943513 1 1
159 0.605582 -0.713493 -0.033525 0.066891 -0.330645 0.762021 1 1
160 0.435082 -1.765650 -0.206944 -2.176473 -1.106134 1.293098 2 1
161 -0.869679 -0.284973 -0.185255 1.340517 -1.710248 -1.319612 0 1
162 1.695010 0.478353 -2.356288 0.126103 -0.667751 -0.738760 1 1
163 1.201750 -0.589959 -0.583958 1.265372 2.093314 0.102967 1 1
164 0.602223 1.408737 0.093544 -1.913840 0.673872 -1.282054 3 1
165 1.273699 0.835188 0.238069 -0.918903 0.953651 0.759154 1 1
166 0.910091 0.783406 -0.282440 -0.757994 -0.153400 0.541408 1 1
167 0.784319 -0.628463 0.531487 -0.758651 -0.436559 0.237206 2 1
168 0.597174 -0.260556 0.776122 0.565709 -0.698971 0.850620 2 1
169 0.512245 -0.906993 0.867897 0.635716 -0.539661 0.041101 2 1
170 0.766694 0.045665 1.652744 -0.681039 -2.002121 -0.208060 2 1
171 0.729920 -0.012315 1.424850 -1.133758 -1.844432 -0.865224 2 1
172 -0.250595 1.333637 0.735398 -2.185661 -0.923218 -0.293878 3 1
173 0.239742 1.083681 -0.966488 -0.543890 -0.015042 0.298120 1 1
174 0.318041 0.919114 -0.293494 -1.777415 -0.452843 0.237000 1 1
175 -0.249524 -1.310616 -1.629695 0.599616 1.389241 0.190145 3 1
176 -0.239498 -0.884659 -1.273002 0.946824 0.600415 -0.458834 3 1
177 -0.247995 -0.379289 0.038455 -0.181334 0.555713 -0.343901 3 1
178 -1.538412 0.875654 -1.387902 -0.180395 0.008275 -0.438000 3 1
179 -0.858788 -0.062931 -0.178983 -0.593047 1.845652 -1.559059 3 1
180 0.044348 0.420883 -0.678748 1.238163 1.329929 -0.157668 3 1
181 1.376463 -1.287646 0.773584 1.157342 -0.286084 -1.054513 2 1
182 0.948949 -1.179621 1.457273 1.080025 1.950553 0.140886 2 1
183 1.235651 -0.462711 0.631757 -1.169777 -0.693590 0.281712 2 1
184 0.668597 1.124688 -1.524719 0.905841 -1.384876 0.539531 1 1
185 -0.966564 0.725873 0.168576 -0.106983 -0.071778 -0.257675 3 1

186 rows × 8 columns

In [310]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[310]:
<matplotlib.axes._subplots.AxesSubplot at 0x1e8302a0dd8>
In [130]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[4]))

Specialized

ANN

In [311]:
X = df_n_ps_std_tc[4]
In [312]:
y = df_n_ps[4]['chosen']
In [313]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [314]:
X_train.shape
Out[314]:
(164, 6)
In [315]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [316]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [317]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [318]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'relu', 'hidden_layer_sizes': (30, 20, 10), 'learning_rate_init': 0.006, 'max_iter': 400}, que permiten obtener un Accuracy de 69.51% y un Kappa del 38.98
Tiempo total: 23.88 minutos
In [319]:
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [320]:
input_tensor = Input(shape = (n0,))
In [321]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = 'tanh')(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [322]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [323]:
model.summary()
Model: "model_12"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_12 (InputLayer)        (None, 6)                 0         
_________________________________________________________________
dense_34 (Dense)             (None, 30)                210       
_________________________________________________________________
dense_35 (Dense)             (None, 20)                620       
_________________________________________________________________
dense_36 (Dense)             (None, 10)                210       
_________________________________________________________________
dense_37 (Dense)             (None, 1)                 11        
=================================================================
Total params: 1,051
Trainable params: 1,051
Non-trainable params: 0
_________________________________________________________________
In [324]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), batch_size= 32,
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 164 samples, validate on 55 samples
Epoch 1/400
164/164 [==============================] - 0s 1ms/step - loss: 0.6993 - accuracy: 0.5427 - val_loss: 0.6692 - val_accuracy: 0.5273
Epoch 2/400
164/164 [==============================] - 0s 79us/step - loss: 0.6715 - accuracy: 0.5915 - val_loss: 0.6762 - val_accuracy: 0.6000
Epoch 3/400
164/164 [==============================] - 0s 79us/step - loss: 0.6623 - accuracy: 0.6524 - val_loss: 0.6776 - val_accuracy: 0.5818
Epoch 4/400
164/164 [==============================] - 0s 73us/step - loss: 0.6537 - accuracy: 0.6524 - val_loss: 0.6679 - val_accuracy: 0.5636
Epoch 5/400
164/164 [==============================] - 0s 79us/step - loss: 0.6466 - accuracy: 0.6280 - val_loss: 0.6403 - val_accuracy: 0.6000
Epoch 6/400
164/164 [==============================] - 0s 67us/step - loss: 0.6318 - accuracy: 0.6220 - val_loss: 0.6412 - val_accuracy: 0.6727
Epoch 7/400
164/164 [==============================] - 0s 73us/step - loss: 0.6263 - accuracy: 0.6280 - val_loss: 0.6400 - val_accuracy: 0.6727
Epoch 8/400
164/164 [==============================] - 0s 79us/step - loss: 0.6172 - accuracy: 0.6646 - val_loss: 0.6310 - val_accuracy: 0.6182
Epoch 9/400
164/164 [==============================] - 0s 79us/step - loss: 0.6082 - accuracy: 0.6829 - val_loss: 0.6228 - val_accuracy: 0.6545
Epoch 10/400
164/164 [==============================] - 0s 73us/step - loss: 0.6042 - accuracy: 0.6707 - val_loss: 0.6159 - val_accuracy: 0.6364
Epoch 11/400
164/164 [==============================] - 0s 73us/step - loss: 0.5958 - accuracy: 0.7012 - val_loss: 0.6233 - val_accuracy: 0.6727
Epoch 12/400
164/164 [==============================] - 0s 79us/step - loss: 0.5875 - accuracy: 0.7134 - val_loss: 0.6460 - val_accuracy: 0.6545
Epoch 13/400
164/164 [==============================] - 0s 79us/step - loss: 0.5821 - accuracy: 0.7012 - val_loss: 0.6505 - val_accuracy: 0.6909
Epoch 14/400
164/164 [==============================] - 0s 67us/step - loss: 0.5733 - accuracy: 0.7012 - val_loss: 0.6551 - val_accuracy: 0.6182
Epoch 15/400
164/164 [==============================] - 0s 85us/step - loss: 0.5678 - accuracy: 0.6951 - val_loss: 0.6350 - val_accuracy: 0.6545
Epoch 16/400
164/164 [==============================] - 0s 73us/step - loss: 0.5555 - accuracy: 0.7317 - val_loss: 0.6191 - val_accuracy: 0.6727
Epoch 17/400
164/164 [==============================] - 0s 73us/step - loss: 0.5577 - accuracy: 0.6951 - val_loss: 0.6126 - val_accuracy: 0.6909
Epoch 18/400
164/164 [==============================] - 0s 85us/step - loss: 0.5450 - accuracy: 0.7134 - val_loss: 0.6076 - val_accuracy: 0.7091
Epoch 19/400
164/164 [==============================] - 0s 79us/step - loss: 0.5379 - accuracy: 0.7195 - val_loss: 0.6199 - val_accuracy: 0.6182
Epoch 20/400
164/164 [==============================] - 0s 79us/step - loss: 0.5352 - accuracy: 0.7134 - val_loss: 0.6304 - val_accuracy: 0.6182
Epoch 21/400
164/164 [==============================] - 0s 79us/step - loss: 0.5252 - accuracy: 0.6829 - val_loss: 0.6354 - val_accuracy: 0.6000
Epoch 22/400
164/164 [==============================] - 0s 79us/step - loss: 0.5109 - accuracy: 0.7500 - val_loss: 0.6131 - val_accuracy: 0.6727
Epoch 23/400
164/164 [==============================] - 0s 85us/step - loss: 0.5071 - accuracy: 0.7195 - val_loss: 0.6194 - val_accuracy: 0.6545
Epoch 24/400
164/164 [==============================] - 0s 85us/step - loss: 0.4921 - accuracy: 0.7500 - val_loss: 0.6385 - val_accuracy: 0.6909
Epoch 25/400
164/164 [==============================] - 0s 79us/step - loss: 0.4844 - accuracy: 0.7744 - val_loss: 0.6484 - val_accuracy: 0.7273
Epoch 26/400
164/164 [==============================] - 0s 79us/step - loss: 0.4813 - accuracy: 0.7744 - val_loss: 0.6422 - val_accuracy: 0.6909
Epoch 27/400
164/164 [==============================] - 0s 79us/step - loss: 0.4806 - accuracy: 0.7500 - val_loss: 0.6699 - val_accuracy: 0.6727
Epoch 28/400
164/164 [==============================] - 0s 79us/step - loss: 0.4638 - accuracy: 0.7622 - val_loss: 0.6358 - val_accuracy: 0.6364
Epoch 29/400
164/164 [==============================] - 0s 91us/step - loss: 0.4543 - accuracy: 0.8049 - val_loss: 0.6213 - val_accuracy: 0.6545
Epoch 30/400
164/164 [==============================] - 0s 98us/step - loss: 0.4385 - accuracy: 0.7988 - val_loss: 0.6106 - val_accuracy: 0.6545
Epoch 31/400
164/164 [==============================] - 0s 85us/step - loss: 0.4292 - accuracy: 0.8293 - val_loss: 0.6209 - val_accuracy: 0.6727
Epoch 32/400
164/164 [==============================] - 0s 79us/step - loss: 0.4208 - accuracy: 0.8415 - val_loss: 0.6121 - val_accuracy: 0.6727
Epoch 33/400
164/164 [==============================] - 0s 79us/step - loss: 0.3988 - accuracy: 0.8354 - val_loss: 0.6432 - val_accuracy: 0.6364
Epoch 34/400
164/164 [==============================] - 0s 73us/step - loss: 0.4050 - accuracy: 0.8110 - val_loss: 0.6197 - val_accuracy: 0.6909
Epoch 35/400
164/164 [==============================] - 0s 79us/step - loss: 0.3873 - accuracy: 0.8598 - val_loss: 0.6285 - val_accuracy: 0.6727

Epoch 00035: ReduceLROnPlateau reducing learning rate to 0.003000000026077032.
Epoch 36/400
164/164 [==============================] - 0s 79us/step - loss: 0.3762 - accuracy: 0.8659 - val_loss: 0.6277 - val_accuracy: 0.6909
Epoch 37/400
164/164 [==============================] - 0s 79us/step - loss: 0.3631 - accuracy: 0.8537 - val_loss: 0.6256 - val_accuracy: 0.6545
Epoch 38/400
164/164 [==============================] - 0s 73us/step - loss: 0.3580 - accuracy: 0.8598 - val_loss: 0.6388 - val_accuracy: 0.6727
Epoch 39/400
164/164 [==============================] - 0s 79us/step - loss: 0.3529 - accuracy: 0.8537 - val_loss: 0.6630 - val_accuracy: 0.6727
Epoch 40/400
164/164 [==============================] - 0s 79us/step - loss: 0.3492 - accuracy: 0.8598 - val_loss: 0.6597 - val_accuracy: 0.6364
Epoch 41/400
164/164 [==============================] - 0s 79us/step - loss: 0.3439 - accuracy: 0.8598 - val_loss: 0.6422 - val_accuracy: 0.6364
Epoch 42/400
164/164 [==============================] - 0s 79us/step - loss: 0.3335 - accuracy: 0.8841 - val_loss: 0.6361 - val_accuracy: 0.6545
Epoch 43/400
164/164 [==============================] - 0s 85us/step - loss: 0.3261 - accuracy: 0.8841 - val_loss: 0.6305 - val_accuracy: 0.7091
Epoch 44/400
164/164 [==============================] - 0s 79us/step - loss: 0.3217 - accuracy: 0.8720 - val_loss: 0.6430 - val_accuracy: 0.7091
Epoch 45/400
164/164 [==============================] - 0s 73us/step - loss: 0.3212 - accuracy: 0.8841 - val_loss: 0.6524 - val_accuracy: 0.6727

Epoch 00045: ReduceLROnPlateau reducing learning rate to 0.001500000013038516.
Epoch 46/400
164/164 [==============================] - 0s 73us/step - loss: 0.3156 - accuracy: 0.8841 - val_loss: 0.6459 - val_accuracy: 0.7091
Epoch 47/400
164/164 [==============================] - 0s 73us/step - loss: 0.3106 - accuracy: 0.8841 - val_loss: 0.6329 - val_accuracy: 0.7091
Epoch 48/400
164/164 [==============================] - 0s 79us/step - loss: 0.3084 - accuracy: 0.8963 - val_loss: 0.6234 - val_accuracy: 0.7091
Epoch 49/400
164/164 [==============================] - 0s 73us/step - loss: 0.3077 - accuracy: 0.8902 - val_loss: 0.6225 - val_accuracy: 0.7091
Epoch 50/400
164/164 [==============================] - 0s 79us/step - loss: 0.3070 - accuracy: 0.9085 - val_loss: 0.6199 - val_accuracy: 0.6727
Epoch 51/400
164/164 [==============================] - 0s 79us/step - loss: 0.3043 - accuracy: 0.9024 - val_loss: 0.6226 - val_accuracy: 0.6727
Epoch 52/400
164/164 [==============================] - 0s 73us/step - loss: 0.3023 - accuracy: 0.8963 - val_loss: 0.6310 - val_accuracy: 0.6727
Epoch 53/400
164/164 [==============================] - 0s 79us/step - loss: 0.3005 - accuracy: 0.8963 - val_loss: 0.6380 - val_accuracy: 0.6727
Epoch 54/400
164/164 [==============================] - 0s 85us/step - loss: 0.2976 - accuracy: 0.8963 - val_loss: 0.6372 - val_accuracy: 0.6909
Epoch 55/400
164/164 [==============================] - 0s 116us/step - loss: 0.2936 - accuracy: 0.8963 - val_loss: 0.6364 - val_accuracy: 0.6909

Epoch 00055: ReduceLROnPlateau reducing learning rate to 0.000750000006519258.
Epoch 56/400
164/164 [==============================] - 0s 98us/step - loss: 0.2894 - accuracy: 0.8902 - val_loss: 0.6365 - val_accuracy: 0.6727
Epoch 57/400
164/164 [==============================] - 0s 91us/step - loss: 0.2889 - accuracy: 0.8963 - val_loss: 0.6357 - val_accuracy: 0.6727
Epoch 58/400
164/164 [==============================] - 0s 104us/step - loss: 0.2874 - accuracy: 0.8963 - val_loss: 0.6389 - val_accuracy: 0.6727
Epoch 59/400
164/164 [==============================] - 0s 85us/step - loss: 0.2862 - accuracy: 0.9085 - val_loss: 0.6397 - val_accuracy: 0.6727
Epoch 60/400
164/164 [==============================] - 0s 79us/step - loss: 0.2851 - accuracy: 0.9024 - val_loss: 0.6383 - val_accuracy: 0.6727
Epoch 61/400
164/164 [==============================] - 0s 67us/step - loss: 0.2844 - accuracy: 0.9024 - val_loss: 0.6425 - val_accuracy: 0.6909
Epoch 62/400
164/164 [==============================] - 0s 73us/step - loss: 0.2831 - accuracy: 0.9146 - val_loss: 0.6449 - val_accuracy: 0.6909
Epoch 63/400
164/164 [==============================] - 0s 79us/step - loss: 0.2820 - accuracy: 0.9085 - val_loss: 0.6544 - val_accuracy: 0.6909
Epoch 64/400
164/164 [==============================] - 0s 110us/step - loss: 0.2821 - accuracy: 0.9085 - val_loss: 0.6567 - val_accuracy: 0.7091
Epoch 65/400
164/164 [==============================] - 0s 104us/step - loss: 0.2807 - accuracy: 0.8963 - val_loss: 0.6525 - val_accuracy: 0.7091

Epoch 00065: ReduceLROnPlateau reducing learning rate to 0.000375000003259629.
Epoch 66/400
164/164 [==============================] - 0s 98us/step - loss: 0.2794 - accuracy: 0.8963 - val_loss: 0.6515 - val_accuracy: 0.7091
Epoch 67/400
164/164 [==============================] - 0s 73us/step - loss: 0.2789 - accuracy: 0.8963 - val_loss: 0.6506 - val_accuracy: 0.6909
Epoch 68/400
164/164 [==============================] - 0s 73us/step - loss: 0.2786 - accuracy: 0.8963 - val_loss: 0.6486 - val_accuracy: 0.6909
Epoch 69/400
164/164 [==============================] - 0s 79us/step - loss: 0.2766 - accuracy: 0.9085 - val_loss: 0.6482 - val_accuracy: 0.6909
Epoch 70/400
164/164 [==============================] - 0s 73us/step - loss: 0.2767 - accuracy: 0.9024 - val_loss: 0.6474 - val_accuracy: 0.6727
Epoch 71/400
164/164 [==============================] - 0s 97us/step - loss: 0.2767 - accuracy: 0.9024 - val_loss: 0.6477 - val_accuracy: 0.6727
Epoch 72/400
164/164 [==============================] - 0s 79us/step - loss: 0.2771 - accuracy: 0.8963 - val_loss: 0.6467 - val_accuracy: 0.6727
Epoch 73/400
164/164 [==============================] - 0s 73us/step - loss: 0.2764 - accuracy: 0.8902 - val_loss: 0.6473 - val_accuracy: 0.6727
Epoch 74/400
164/164 [==============================] - 0s 67us/step - loss: 0.2756 - accuracy: 0.8902 - val_loss: 0.6479 - val_accuracy: 0.6727
Epoch 75/400
164/164 [==============================] - 0s 67us/step - loss: 0.2745 - accuracy: 0.8963 - val_loss: 0.6484 - val_accuracy: 0.6909

Epoch 00075: ReduceLROnPlateau reducing learning rate to 0.0001875000016298145.
Epoch 76/400
164/164 [==============================] - 0s 67us/step - loss: 0.2737 - accuracy: 0.8963 - val_loss: 0.6479 - val_accuracy: 0.6909
Epoch 77/400
164/164 [==============================] - 0s 73us/step - loss: 0.2734 - accuracy: 0.8963 - val_loss: 0.6463 - val_accuracy: 0.6909
Epoch 78/400
164/164 [==============================] - 0s 67us/step - loss: 0.2729 - accuracy: 0.8963 - val_loss: 0.6453 - val_accuracy: 0.6909
Epoch 79/400
164/164 [==============================] - 0s 79us/step - loss: 0.2728 - accuracy: 0.9024 - val_loss: 0.6449 - val_accuracy: 0.7091
Epoch 80/400
164/164 [==============================] - 0s 79us/step - loss: 0.2723 - accuracy: 0.9024 - val_loss: 0.6455 - val_accuracy: 0.7091
Epoch 81/400
164/164 [==============================] - 0s 73us/step - loss: 0.2719 - accuracy: 0.9024 - val_loss: 0.6473 - val_accuracy: 0.7091
Epoch 82/400
164/164 [==============================] - 0s 73us/step - loss: 0.2715 - accuracy: 0.8963 - val_loss: 0.6484 - val_accuracy: 0.7091
Epoch 83/400
164/164 [==============================] - 0s 79us/step - loss: 0.2715 - accuracy: 0.8963 - val_loss: 0.6499 - val_accuracy: 0.7091
Epoch 84/400
164/164 [==============================] - 0s 110us/step - loss: 0.2711 - accuracy: 0.8963 - val_loss: 0.6493 - val_accuracy: 0.7091
Epoch 85/400
164/164 [==============================] - 0s 98us/step - loss: 0.2709 - accuracy: 0.8963 - val_loss: 0.6481 - val_accuracy: 0.6909

Epoch 00085: ReduceLROnPlateau reducing learning rate to 9.375000081490725e-05.
Epoch 86/400
164/164 [==============================] - 0s 73us/step - loss: 0.2706 - accuracy: 0.8963 - val_loss: 0.6476 - val_accuracy: 0.6909
Epoch 87/400
164/164 [==============================] - 0s 73us/step - loss: 0.2707 - accuracy: 0.8963 - val_loss: 0.6475 - val_accuracy: 0.6909
Epoch 88/400
164/164 [==============================] - 0s 73us/step - loss: 0.2704 - accuracy: 0.8963 - val_loss: 0.6480 - val_accuracy: 0.6909
Epoch 89/400
164/164 [==============================] - 0s 73us/step - loss: 0.2703 - accuracy: 0.9024 - val_loss: 0.6482 - val_accuracy: 0.6909
Epoch 90/400
164/164 [==============================] - 0s 67us/step - loss: 0.2702 - accuracy: 0.9024 - val_loss: 0.6475 - val_accuracy: 0.6909
Epoch 91/400
164/164 [==============================] - 0s 73us/step - loss: 0.2702 - accuracy: 0.8963 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 92/400
164/164 [==============================] - 0s 73us/step - loss: 0.2700 - accuracy: 0.8963 - val_loss: 0.6475 - val_accuracy: 0.6909
Epoch 93/400
164/164 [==============================] - 0s 73us/step - loss: 0.2699 - accuracy: 0.8963 - val_loss: 0.6475 - val_accuracy: 0.6909
Epoch 94/400
164/164 [==============================] - 0s 73us/step - loss: 0.2697 - accuracy: 0.8963 - val_loss: 0.6471 - val_accuracy: 0.6909
Epoch 95/400
164/164 [==============================] - 0s 67us/step - loss: 0.2696 - accuracy: 0.8963 - val_loss: 0.6469 - val_accuracy: 0.6909

Epoch 00095: ReduceLROnPlateau reducing learning rate to 4.6875000407453626e-05.
Epoch 96/400
164/164 [==============================] - 0s 67us/step - loss: 0.2693 - accuracy: 0.8963 - val_loss: 0.6466 - val_accuracy: 0.6909
Epoch 97/400
164/164 [==============================] - 0s 67us/step - loss: 0.2692 - accuracy: 0.8963 - val_loss: 0.6464 - val_accuracy: 0.6909
Epoch 98/400
164/164 [==============================] - 0s 67us/step - loss: 0.2691 - accuracy: 0.8963 - val_loss: 0.6465 - val_accuracy: 0.6909
Epoch 99/400
164/164 [==============================] - 0s 104us/step - loss: 0.2690 - accuracy: 0.8963 - val_loss: 0.6466 - val_accuracy: 0.6909
Epoch 100/400
164/164 [==============================] - 0s 73us/step - loss: 0.2689 - accuracy: 0.9024 - val_loss: 0.6468 - val_accuracy: 0.6909
Epoch 101/400
164/164 [==============================] - 0s 67us/step - loss: 0.2689 - accuracy: 0.9024 - val_loss: 0.6470 - val_accuracy: 0.6909
Epoch 102/400
164/164 [==============================] - 0s 73us/step - loss: 0.2688 - accuracy: 0.9024 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 103/400
164/164 [==============================] - 0s 79us/step - loss: 0.2687 - accuracy: 0.9024 - val_loss: 0.6471 - val_accuracy: 0.6909
Epoch 104/400
164/164 [==============================] - 0s 79us/step - loss: 0.2686 - accuracy: 0.9024 - val_loss: 0.6469 - val_accuracy: 0.6909
Epoch 105/400
164/164 [==============================] - 0s 61us/step - loss: 0.2686 - accuracy: 0.9085 - val_loss: 0.6469 - val_accuracy: 0.6909

Epoch 00105: ReduceLROnPlateau reducing learning rate to 2.3437500203726813e-05.
Epoch 106/400
164/164 [==============================] - 0s 67us/step - loss: 0.2685 - accuracy: 0.9085 - val_loss: 0.6469 - val_accuracy: 0.6909
Epoch 107/400
164/164 [==============================] - 0s 61us/step - loss: 0.2685 - accuracy: 0.9085 - val_loss: 0.6470 - val_accuracy: 0.6909
Epoch 108/400
164/164 [==============================] - 0s 73us/step - loss: 0.2685 - accuracy: 0.9024 - val_loss: 0.6471 - val_accuracy: 0.6909
Epoch 109/400
164/164 [==============================] - 0s 67us/step - loss: 0.2684 - accuracy: 0.9024 - val_loss: 0.6471 - val_accuracy: 0.6909
Epoch 110/400
164/164 [==============================] - 0s 67us/step - loss: 0.2684 - accuracy: 0.9024 - val_loss: 0.6472 - val_accuracy: 0.6909
Epoch 111/400
164/164 [==============================] - 0s 73us/step - loss: 0.2684 - accuracy: 0.9024 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 112/400
164/164 [==============================] - 0s 67us/step - loss: 0.2683 - accuracy: 0.9085 - val_loss: 0.6472 - val_accuracy: 0.6909
Epoch 113/400
164/164 [==============================] - 0s 73us/step - loss: 0.2683 - accuracy: 0.9085 - val_loss: 0.6471 - val_accuracy: 0.6909
Epoch 114/400
164/164 [==============================] - 0s 98us/step - loss: 0.2683 - accuracy: 0.9085 - val_loss: 0.6470 - val_accuracy: 0.6909
Epoch 115/400
164/164 [==============================] - 0s 85us/step - loss: 0.2682 - accuracy: 0.9085 - val_loss: 0.6469 - val_accuracy: 0.6909

Epoch 00115: ReduceLROnPlateau reducing learning rate to 1.1718750101863407e-05.
Epoch 116/400
164/164 [==============================] - 0s 79us/step - loss: 0.2682 - accuracy: 0.9085 - val_loss: 0.6469 - val_accuracy: 0.6909
Epoch 117/400
164/164 [==============================] - 0s 73us/step - loss: 0.2681 - accuracy: 0.9085 - val_loss: 0.6469 - val_accuracy: 0.6909
Epoch 118/400
164/164 [==============================] - 0s 79us/step - loss: 0.2681 - accuracy: 0.9085 - val_loss: 0.6469 - val_accuracy: 0.6909
Epoch 119/400
164/164 [==============================] - 0s 67us/step - loss: 0.2681 - accuracy: 0.9085 - val_loss: 0.6469 - val_accuracy: 0.6909
Epoch 120/400
164/164 [==============================] - 0s 79us/step - loss: 0.2681 - accuracy: 0.9085 - val_loss: 0.6469 - val_accuracy: 0.6909
Epoch 121/400
164/164 [==============================] - 0s 79us/step - loss: 0.2681 - accuracy: 0.9085 - val_loss: 0.6469 - val_accuracy: 0.6909
Epoch 122/400
164/164 [==============================] - 0s 79us/step - loss: 0.2680 - accuracy: 0.9085 - val_loss: 0.6469 - val_accuracy: 0.6909
Epoch 123/400
164/164 [==============================] - 0s 73us/step - loss: 0.2680 - accuracy: 0.9085 - val_loss: 0.6470 - val_accuracy: 0.6909
Epoch 124/400
164/164 [==============================] - 0s 67us/step - loss: 0.2680 - accuracy: 0.9085 - val_loss: 0.6470 - val_accuracy: 0.6909
Epoch 125/400
164/164 [==============================] - 0s 79us/step - loss: 0.2680 - accuracy: 0.9085 - val_loss: 0.6471 - val_accuracy: 0.6909

Epoch 00125: ReduceLROnPlateau reducing learning rate to 5.859375050931703e-06.
Epoch 126/400
164/164 [==============================] - 0s 73us/step - loss: 0.2680 - accuracy: 0.9085 - val_loss: 0.6471 - val_accuracy: 0.6909
Epoch 127/400
164/164 [==============================] - 0s 73us/step - loss: 0.2680 - accuracy: 0.9085 - val_loss: 0.6471 - val_accuracy: 0.6909
Epoch 128/400
164/164 [==============================] - 0s 67us/step - loss: 0.2680 - accuracy: 0.9085 - val_loss: 0.6471 - val_accuracy: 0.6909
Epoch 129/400
164/164 [==============================] - 0s 67us/step - loss: 0.2680 - accuracy: 0.9085 - val_loss: 0.6471 - val_accuracy: 0.6909
Epoch 130/400
164/164 [==============================] - 0s 73us/step - loss: 0.2679 - accuracy: 0.9085 - val_loss: 0.6471 - val_accuracy: 0.6909
Epoch 131/400
164/164 [==============================] - 0s 73us/step - loss: 0.2679 - accuracy: 0.9085 - val_loss: 0.6471 - val_accuracy: 0.6909
Epoch 132/400
164/164 [==============================] - 0s 73us/step - loss: 0.2679 - accuracy: 0.9085 - val_loss: 0.6471 - val_accuracy: 0.6909
Epoch 133/400
164/164 [==============================] - 0s 73us/step - loss: 0.2679 - accuracy: 0.9085 - val_loss: 0.6471 - val_accuracy: 0.6909
Epoch 134/400
164/164 [==============================] - 0s 67us/step - loss: 0.2679 - accuracy: 0.9085 - val_loss: 0.6471 - val_accuracy: 0.6909
Epoch 135/400
164/164 [==============================] - 0s 67us/step - loss: 0.2679 - accuracy: 0.9085 - val_loss: 0.6472 - val_accuracy: 0.6909

Epoch 00135: ReduceLROnPlateau reducing learning rate to 2.9296875254658516e-06.
Epoch 136/400
164/164 [==============================] - 0s 73us/step - loss: 0.2679 - accuracy: 0.9085 - val_loss: 0.6472 - val_accuracy: 0.6909
Epoch 137/400
164/164 [==============================] - 0s 67us/step - loss: 0.2679 - accuracy: 0.9085 - val_loss: 0.6472 - val_accuracy: 0.6909
Epoch 138/400
164/164 [==============================] - 0s 79us/step - loss: 0.2679 - accuracy: 0.9085 - val_loss: 0.6472 - val_accuracy: 0.6909
Epoch 139/400
164/164 [==============================] - 0s 104us/step - loss: 0.2679 - accuracy: 0.9085 - val_loss: 0.6472 - val_accuracy: 0.6909
Epoch 140/400
164/164 [==============================] - 0s 98us/step - loss: 0.2679 - accuracy: 0.9085 - val_loss: 0.6472 - val_accuracy: 0.6909
Epoch 141/400
164/164 [==============================] - 0s 85us/step - loss: 0.2679 - accuracy: 0.9085 - val_loss: 0.6472 - val_accuracy: 0.6909
Epoch 142/400
164/164 [==============================] - 0s 73us/step - loss: 0.2679 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 143/400
164/164 [==============================] - 0s 67us/step - loss: 0.2679 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 144/400
164/164 [==============================] - 0s 73us/step - loss: 0.2679 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 145/400
164/164 [==============================] - 0s 67us/step - loss: 0.2679 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909

Epoch 00145: ReduceLROnPlateau reducing learning rate to 1.4648437627329258e-06.
Epoch 146/400
164/164 [==============================] - 0s 73us/step - loss: 0.2679 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 147/400
164/164 [==============================] - 0s 67us/step - loss: 0.2679 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 148/400
164/164 [==============================] - 0s 67us/step - loss: 0.2679 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 149/400
164/164 [==============================] - 0s 67us/step - loss: 0.2679 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 150/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 151/400
164/164 [==============================] - 0s 61us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 152/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 153/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 154/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 155/400
164/164 [==============================] - 0s 79us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909

Epoch 00155: ReduceLROnPlateau reducing learning rate to 7.324218813664629e-07.
Epoch 156/400
164/164 [==============================] - 0s 128us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 157/400
164/164 [==============================] - 0s 116us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 158/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 159/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 160/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 161/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 162/400
164/164 [==============================] - 0s 79us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 163/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 164/400
164/164 [==============================] - 0s 79us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 165/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909

Epoch 00165: ReduceLROnPlateau reducing learning rate to 3.6621094068323146e-07.
Epoch 166/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 167/400
164/164 [==============================] - 0s 61us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 168/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 169/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 170/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 171/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 172/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 173/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 174/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 175/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909

Epoch 00175: ReduceLROnPlateau reducing learning rate to 1.8310547034161573e-07.
Epoch 176/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 177/400
164/164 [==============================] - 0s 79us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 178/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 179/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 180/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 181/400
164/164 [==============================] - 0s 79us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 182/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 183/400
164/164 [==============================] - 0s 79us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 184/400
164/164 [==============================] - 0s 61us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 185/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909

Epoch 00185: ReduceLROnPlateau reducing learning rate to 9.155273517080786e-08.
Epoch 186/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 187/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 188/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 189/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 190/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 191/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 192/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 193/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 194/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 195/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909

Epoch 00195: ReduceLROnPlateau reducing learning rate to 4.577636758540393e-08.
Epoch 196/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 197/400
164/164 [==============================] - 0s 104us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 198/400
164/164 [==============================] - 0s 79us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 199/400
164/164 [==============================] - 0s 79us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 200/400
164/164 [==============================] - 0s 79us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 201/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 202/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 203/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 204/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 205/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909

Epoch 00205: ReduceLROnPlateau reducing learning rate to 2.2888183792701966e-08.
Epoch 206/400
164/164 [==============================] - 0s 61us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 207/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 208/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 209/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 210/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 211/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 212/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 213/400
164/164 [==============================] - 0s 98us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 214/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 215/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909

Epoch 00215: ReduceLROnPlateau reducing learning rate to 1.1444091896350983e-08.
Epoch 216/400
164/164 [==============================] - 0s 116us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 217/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 218/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 219/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 220/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 221/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 222/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 223/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 224/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 225/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909

Epoch 00225: ReduceLROnPlateau reducing learning rate to 5.7220459481754915e-09.
Epoch 226/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 227/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 228/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 229/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 230/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 231/400
164/164 [==============================] - 0s 61us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 232/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 233/400
164/164 [==============================] - 0s 79us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 234/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 235/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909

Epoch 00235: ReduceLROnPlateau reducing learning rate to 2.8610229740877458e-09.
Epoch 236/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 237/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 238/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 239/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 240/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 241/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 242/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 243/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 244/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 245/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909

Epoch 00245: ReduceLROnPlateau reducing learning rate to 1.4305114870438729e-09.
Epoch 246/400
164/164 [==============================] - 0s 79us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 247/400
164/164 [==============================] - 0s 79us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 248/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 249/400
164/164 [==============================] - 0s 79us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 250/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 251/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 252/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 253/400
164/164 [==============================] - 0s 189us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 254/400
164/164 [==============================] - 0s 110us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 255/400
164/164 [==============================] - 0s 104us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909

Epoch 00255: ReduceLROnPlateau reducing learning rate to 7.152557435219364e-10.
Epoch 256/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 257/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 258/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 259/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 260/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 261/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 262/400
164/164 [==============================] - 0s 61us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 263/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 264/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 265/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909

Epoch 00265: ReduceLROnPlateau reducing learning rate to 3.576278717609682e-10.
Epoch 266/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 267/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 268/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 269/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 270/400
164/164 [==============================] - 0s 79us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 271/400
164/164 [==============================] - 0s 79us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 272/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 273/400
164/164 [==============================] - 0s 79us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 274/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 275/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909

Epoch 00275: ReduceLROnPlateau reducing learning rate to 1.788139358804841e-10.
Epoch 276/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 277/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 278/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 279/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 280/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 281/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 282/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 283/400
164/164 [==============================] - 0s 61us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 284/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 285/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909

Epoch 00285: ReduceLROnPlateau reducing learning rate to 8.940696794024205e-11.
Epoch 286/400
164/164 [==============================] - 0s 67us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 287/400
164/164 [==============================] - 0s 98us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 288/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 289/400
164/164 [==============================] - 0s 98us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 290/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 291/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 292/400
164/164 [==============================] - 0s 98us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 293/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 294/400
164/164 [==============================] - 0s 98us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 295/400
164/164 [==============================] - 0s 110us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909

Epoch 00295: ReduceLROnPlateau reducing learning rate to 4.470348397012103e-11.
Epoch 296/400
164/164 [==============================] - 0s 110us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 297/400
164/164 [==============================] - 0s 104us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 298/400
164/164 [==============================] - 0s 98us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 299/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 300/400
164/164 [==============================] - 0s 116us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 301/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 302/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 303/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 304/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 305/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909

Epoch 00305: ReduceLROnPlateau reducing learning rate to 2.2351741985060514e-11.
Epoch 306/400
164/164 [==============================] - 0s 134us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 307/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 308/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 309/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 310/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 311/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 312/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 313/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 314/400
164/164 [==============================] - 0s 98us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 315/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909

Epoch 00315: ReduceLROnPlateau reducing learning rate to 1.1175870992530257e-11.
Epoch 316/400
164/164 [==============================] - 0s 79us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 317/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 318/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 319/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 320/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 321/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 322/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 323/400
164/164 [==============================] - 0s 79us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 324/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 325/400
164/164 [==============================] - 0s 79us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909

Epoch 00325: ReduceLROnPlateau reducing learning rate to 5.5879354962651284e-12.
Epoch 326/400
164/164 [==============================] - 0s 128us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 327/400
164/164 [==============================] - 0s 98us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 328/400
164/164 [==============================] - 0s 98us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 329/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 330/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 331/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 332/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 333/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 334/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 335/400
164/164 [==============================] - 0s 98us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909

Epoch 00335: ReduceLROnPlateau reducing learning rate to 2.7939677481325642e-12.
Epoch 336/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 337/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 338/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 339/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 340/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 341/400
164/164 [==============================] - 0s 104us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 342/400
164/164 [==============================] - 0s 98us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 343/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 344/400
164/164 [==============================] - 0s 97us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 345/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909

Epoch 00345: ReduceLROnPlateau reducing learning rate to 1.3969838740662821e-12.
Epoch 346/400
164/164 [==============================] - 0s 104us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 347/400
164/164 [==============================] - 0s 104us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 348/400
164/164 [==============================] - 0s 110us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 349/400
164/164 [==============================] - 0s 110us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 350/400
164/164 [==============================] - 0s 110us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 351/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 352/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 353/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 354/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 355/400
164/164 [==============================] - 0s 98us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909

Epoch 00355: ReduceLROnPlateau reducing learning rate to 6.984919370331411e-13.
Epoch 356/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 357/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 358/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 359/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 360/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 361/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 362/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 363/400
164/164 [==============================] - 0s 79us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 364/400
164/164 [==============================] - 0s 79us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 365/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909

Epoch 00365: ReduceLROnPlateau reducing learning rate to 3.4924596851657053e-13.
Epoch 366/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 367/400
164/164 [==============================] - 0s 79us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 368/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 369/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 370/400
164/164 [==============================] - 0s 110us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 371/400
164/164 [==============================] - 0s 110us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 372/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 373/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 374/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 375/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909

Epoch 00375: ReduceLROnPlateau reducing learning rate to 1.7462298425828526e-13.
Epoch 376/400
164/164 [==============================] - 0s 116us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 377/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 378/400
164/164 [==============================] - 0s 98us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 379/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 380/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 381/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 382/400
164/164 [==============================] - 0s 79us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 383/400
164/164 [==============================] - 0s 79us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 384/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 385/400
164/164 [==============================] - 0s 73us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909

Epoch 00385: ReduceLROnPlateau reducing learning rate to 8.731149212914263e-14.
Epoch 386/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 387/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 388/400
164/164 [==============================] - 0s 79us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 389/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 390/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 391/400
164/164 [==============================] - 0s 79us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 392/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 393/400
164/164 [==============================] - 0s 79us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 394/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 395/400
164/164 [==============================] - 0s 116us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909

Epoch 00395: ReduceLROnPlateau reducing learning rate to 4.3655746064571316e-14.
Epoch 396/400
164/164 [==============================] - 0s 104us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 397/400
164/164 [==============================] - 0s 85us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 398/400
164/164 [==============================] - 0s 98us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 399/400
164/164 [==============================] - 0s 91us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
Epoch 400/400
164/164 [==============================] - 0s 98us/step - loss: 0.2678 - accuracy: 0.9085 - val_loss: 0.6473 - val_accuracy: 0.6909
In [325]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 400)
In [326]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
55/55 [==============================] - 0s 55us/step
test loss: 0.6473115801811218, test accuracy: 0.6909090876579285
In [327]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.753968253968254
In [328]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
Kappa:  0.3820224719101123

KMeans

In [329]:
X
Out[329]:
tonalcentroidfiles_1 tonalcentroidfiles_2 tonalcentroidfiles_3 tonalcentroidfiles_4 tonalcentroidfiles_5 tonalcentroidfiles_6
0 -1.035481 1.779354 1.874576 0.924814 -0.129662 2.608421
1 0.965487 -0.399971 -1.606069 0.008311 0.834341 0.513694
2 -0.141249 -1.969933 -0.960470 1.005123 -1.117123 -2.399517
3 -1.590590 -0.729741 -0.575342 0.587988 -0.885561 -0.752828
4 -0.391524 -0.894181 -0.426309 1.017585 -0.391173 -0.920259
5 -1.256622 -0.886861 -0.850243 0.516749 -0.491454 -0.072867
6 -1.579202 0.121365 -0.522749 1.012025 -0.547676 -0.140430
7 -1.760350 -0.182429 -0.008789 1.576085 -0.878841 0.252104
8 1.115526 1.555384 0.609404 0.558809 -0.514428 -0.221726
9 1.467291 1.402697 0.806896 -0.279535 0.939735 0.758333
10 0.972379 1.550575 -0.223468 0.899199 1.412818 0.386724
11 0.294385 0.890870 0.493531 0.142145 0.212432 1.463886
12 0.795134 0.176458 1.588747 -0.412034 -0.982878 -0.299581
13 0.694481 0.577820 0.319393 0.451356 0.219257 0.563199
14 1.169114 0.075245 -0.980006 1.330732 2.094068 1.785970
15 0.962642 0.380225 -1.261850 1.044019 1.339949 1.776870
16 1.352245 0.463507 -0.679184 0.941519 2.196602 1.991369
17 1.784002 -1.453636 -1.128885 -0.626496 0.399672 -0.605861
18 0.929212 -0.538274 -1.016394 -0.167176 0.557933 1.511009
19 1.199761 -0.727252 0.322239 -1.105069 -0.125311 -0.979170
20 -0.485056 0.796900 0.581966 1.884586 -0.705890 -0.725300
21 -0.547233 0.692440 -0.162284 2.025268 -0.631876 -0.337240
22 1.446103 -0.074850 -0.132752 -0.064117 -0.209506 -0.465551
23 -0.312063 0.030270 -1.160963 0.726155 -1.511552 -0.509175
24 1.175126 -0.143713 -0.522479 0.641015 0.500311 0.617748
25 -1.044292 -0.058933 -1.340279 -1.302246 1.751828 -0.815403
26 -0.849044 0.079838 -0.400536 -1.312330 1.498217 -0.550869
27 -0.730672 -0.326196 -0.478608 -0.832610 -0.556236 -0.653280
28 -0.380922 -0.892886 -0.555313 -0.113628 1.211258 -0.901155
29 -0.368302 -1.168844 -0.094765 -0.158075 1.016584 -1.274561
... ... ... ... ... ... ...
189 -1.023243 0.827082 0.695531 0.482823 -0.093190 -0.130945
190 1.643548 -0.570770 0.545333 -0.137189 0.295910 -0.891672
191 1.543182 -0.533850 0.979103 0.227528 0.216491 -0.016099
192 1.416929 -1.770555 0.592692 -1.546796 -0.112419 -0.017441
193 -1.336444 0.162214 -1.528887 1.340066 0.343647 -0.060973
194 -0.331197 -0.545328 0.449891 -2.242097 0.210220 1.299600
195 -0.991382 -0.378373 -0.215170 -2.818431 1.156878 -0.599042
196 0.827092 0.502299 0.219306 1.474834 0.577530 0.832676
197 0.976291 0.325663 -0.091820 0.723604 0.494609 0.610596
198 0.903378 0.857383 0.090549 0.948012 1.127442 0.927032
199 -1.135922 -0.217483 -0.201444 0.204262 -0.033230 -0.725561
200 -1.143077 -0.289624 -0.109440 0.093244 0.007101 -0.571608
201 -1.325584 -0.109383 -0.850284 -0.442939 0.518129 -0.996845
202 0.270878 1.568003 -0.899682 0.187348 -0.995623 0.436835
203 -0.010376 1.403657 -0.298654 0.126520 -0.803249 -0.284875
204 -0.149606 0.679408 -0.527828 0.145473 0.226461 0.232361
205 -1.281900 0.472582 2.041397 -0.186464 1.140780 -0.694445
206 -1.561361 0.699591 0.373931 0.512801 0.245563 -1.259098
207 -0.548022 0.646014 -0.015758 -0.364427 1.106060 -0.395692
208 -0.689835 0.729721 0.242422 0.167324 -0.269920 0.625568
209 -1.182263 0.898528 0.655331 1.146978 -0.973699 0.509883
210 -0.465862 0.576977 -0.088421 1.290934 0.648005 0.669298
211 -0.265321 1.252143 0.230904 0.383047 -0.920749 0.237760
212 0.205358 1.300786 0.929349 -0.432002 -0.464366 -0.242135
213 -0.025600 0.467818 0.261063 -1.437444 -0.391460 -0.995280
214 -1.082557 1.025513 2.276661 1.056731 0.361540 1.291351
215 -1.297371 1.948703 2.264684 1.377703 1.194669 1.983124
216 -0.926424 0.162164 1.016687 1.945841 -1.341651 0.150826
217 -1.375041 -0.362757 -0.599873 1.478900 -0.021584 -0.846072
218 -0.974264 0.740461 0.889462 0.014997 1.024334 -0.992000

219 rows × 6 columns

In [330]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[330]:
[1314.0,
 1103.6617898421102,
 933.9046374976435,
 830.0952355796812,
 752.8157274494505,
 696.8283563577859,
 641.355058887789,
 599.6834692450786,
 558.0899857646746,
 538.6016435622136,
 502.540180641064,
 477.03865333096127,
 457.1745404655215,
 443.08717934712786]
In [331]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[331]:
[<matplotlib.lines.Line2D at 0x1e831ea44e0>]

K=3

In [332]:
kmeans_tc = KMeans(n_clusters=3, random_state=0, n_init=10)
kmeans_tc.fit(X)
Out[332]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=3, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [333]:
kmeans_tc.labels_
Out[333]:
array([2, 2, 0, 1, 0, 1, 1, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 0, 2, 0, 2, 2,
       0, 2, 2, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1,
       2, 2, 1, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 2, 0, 2, 1, 1, 2, 2, 2,
       0, 0, 0, 0, 1, 0, 2, 2, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 2, 0, 2, 0,
       2, 1, 1, 2, 2, 1, 2, 2, 2, 0, 0, 0, 1, 1, 0, 1, 1, 1, 2, 1, 1, 0,
       2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2,
       2, 0, 0, 0, 2, 0, 0, 2, 1, 0, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
       0, 0, 2, 0, 0, 0, 0, 0, 2, 2, 0, 2, 0, 0, 1, 1, 1, 1, 1, 1, 2, 1,
       1, 0, 0, 0, 2, 2, 2, 1, 1, 1, 1, 1, 1, 2, 0, 0, 0, 1, 0, 1, 2, 2,
       2, 1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, 1, 1])
In [334]:
clusters_tc = kmeans_tc.predict(X)
clusters_tc
Out[334]:
array([2, 2, 0, 1, 0, 1, 1, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 0, 2, 0, 2, 2,
       0, 2, 2, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1,
       2, 2, 1, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 2, 0, 2, 1, 1, 2, 2, 2,
       0, 0, 0, 0, 1, 0, 2, 2, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 2, 0, 2, 0,
       2, 1, 1, 2, 2, 1, 2, 2, 2, 0, 0, 0, 1, 1, 0, 1, 1, 1, 2, 1, 1, 0,
       2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2,
       2, 0, 0, 0, 2, 0, 0, 2, 1, 0, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
       0, 0, 2, 0, 0, 0, 0, 0, 2, 2, 0, 2, 0, 0, 1, 1, 1, 1, 1, 1, 2, 1,
       1, 0, 0, 0, 2, 2, 2, 1, 1, 1, 1, 1, 1, 2, 0, 0, 0, 1, 0, 1, 2, 2,
       2, 1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, 1, 1])
In [335]:
X.loc[:,'Cluster'] = clusters_tc
X.loc[:,'chosen'] = list(y)
In [336]:
X
Out[336]:
tonalcentroidfiles_1 tonalcentroidfiles_2 tonalcentroidfiles_3 tonalcentroidfiles_4 tonalcentroidfiles_5 tonalcentroidfiles_6 Cluster chosen
0 -1.035481 1.779354 1.874576 0.924814 -0.129662 2.608421 2 0
1 0.965487 -0.399971 -1.606069 0.008311 0.834341 0.513694 2 0
2 -0.141249 -1.969933 -0.960470 1.005123 -1.117123 -2.399517 0 0
3 -1.590590 -0.729741 -0.575342 0.587988 -0.885561 -0.752828 1 0
4 -0.391524 -0.894181 -0.426309 1.017585 -0.391173 -0.920259 0 0
5 -1.256622 -0.886861 -0.850243 0.516749 -0.491454 -0.072867 1 0
6 -1.579202 0.121365 -0.522749 1.012025 -0.547676 -0.140430 1 0
7 -1.760350 -0.182429 -0.008789 1.576085 -0.878841 0.252104 2 0
8 1.115526 1.555384 0.609404 0.558809 -0.514428 -0.221726 2 0
9 1.467291 1.402697 0.806896 -0.279535 0.939735 0.758333 2 0
10 0.972379 1.550575 -0.223468 0.899199 1.412818 0.386724 2 0
11 0.294385 0.890870 0.493531 0.142145 0.212432 1.463886 2 0
12 0.795134 0.176458 1.588747 -0.412034 -0.982878 -0.299581 0 0
13 0.694481 0.577820 0.319393 0.451356 0.219257 0.563199 2 0
14 1.169114 0.075245 -0.980006 1.330732 2.094068 1.785970 2 0
15 0.962642 0.380225 -1.261850 1.044019 1.339949 1.776870 2 0
16 1.352245 0.463507 -0.679184 0.941519 2.196602 1.991369 2 0
17 1.784002 -1.453636 -1.128885 -0.626496 0.399672 -0.605861 0 0
18 0.929212 -0.538274 -1.016394 -0.167176 0.557933 1.511009 2 0
19 1.199761 -0.727252 0.322239 -1.105069 -0.125311 -0.979170 0 0
20 -0.485056 0.796900 0.581966 1.884586 -0.705890 -0.725300 2 0
21 -0.547233 0.692440 -0.162284 2.025268 -0.631876 -0.337240 2 0
22 1.446103 -0.074850 -0.132752 -0.064117 -0.209506 -0.465551 0 0
23 -0.312063 0.030270 -1.160963 0.726155 -1.511552 -0.509175 2 0
24 1.175126 -0.143713 -0.522479 0.641015 0.500311 0.617748 2 0
25 -1.044292 -0.058933 -1.340279 -1.302246 1.751828 -0.815403 1 0
26 -0.849044 0.079838 -0.400536 -1.312330 1.498217 -0.550869 1 0
27 -0.730672 -0.326196 -0.478608 -0.832610 -0.556236 -0.653280 1 0
28 -0.380922 -0.892886 -0.555313 -0.113628 1.211258 -0.901155 1 0
29 -0.368302 -1.168844 -0.094765 -0.158075 1.016584 -1.274561 1 0
... ... ... ... ... ... ... ... ...
189 -1.023243 0.827082 0.695531 0.482823 -0.093190 -0.130945 2 1
190 1.643548 -0.570770 0.545333 -0.137189 0.295910 -0.891672 0 1
191 1.543182 -0.533850 0.979103 0.227528 0.216491 -0.016099 0 1
192 1.416929 -1.770555 0.592692 -1.546796 -0.112419 -0.017441 0 1
193 -1.336444 0.162214 -1.528887 1.340066 0.343647 -0.060973 1 1
194 -0.331197 -0.545328 0.449891 -2.242097 0.210220 1.299600 0 1
195 -0.991382 -0.378373 -0.215170 -2.818431 1.156878 -0.599042 1 1
196 0.827092 0.502299 0.219306 1.474834 0.577530 0.832676 2 1
197 0.976291 0.325663 -0.091820 0.723604 0.494609 0.610596 2 1
198 0.903378 0.857383 0.090549 0.948012 1.127442 0.927032 2 1
199 -1.135922 -0.217483 -0.201444 0.204262 -0.033230 -0.725561 1 1
200 -1.143077 -0.289624 -0.109440 0.093244 0.007101 -0.571608 1 1
201 -1.325584 -0.109383 -0.850284 -0.442939 0.518129 -0.996845 1 1
202 0.270878 1.568003 -0.899682 0.187348 -0.995623 0.436835 2 1
203 -0.010376 1.403657 -0.298654 0.126520 -0.803249 -0.284875 2 1
204 -0.149606 0.679408 -0.527828 0.145473 0.226461 0.232361 2 1
205 -1.281900 0.472582 2.041397 -0.186464 1.140780 -0.694445 1 1
206 -1.561361 0.699591 0.373931 0.512801 0.245563 -1.259098 1 1
207 -0.548022 0.646014 -0.015758 -0.364427 1.106060 -0.395692 1 1
208 -0.689835 0.729721 0.242422 0.167324 -0.269920 0.625568 2 1
209 -1.182263 0.898528 0.655331 1.146978 -0.973699 0.509883 2 1
210 -0.465862 0.576977 -0.088421 1.290934 0.648005 0.669298 2 1
211 -0.265321 1.252143 0.230904 0.383047 -0.920749 0.237760 2 1
212 0.205358 1.300786 0.929349 -0.432002 -0.464366 -0.242135 2 1
213 -0.025600 0.467818 0.261063 -1.437444 -0.391460 -0.995280 1 1
214 -1.082557 1.025513 2.276661 1.056731 0.361540 1.291351 2 1
215 -1.297371 1.948703 2.264684 1.377703 1.194669 1.983124 2 1
216 -0.926424 0.162164 1.016687 1.945841 -1.341651 0.150826 2 1
217 -1.375041 -0.362757 -0.599873 1.478900 -0.021584 -0.846072 1 1
218 -0.974264 0.740461 0.889462 0.014997 1.024334 -0.992000 1 1

219 rows × 8 columns

In [337]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[337]:
<matplotlib.axes._subplots.AxesSubplot at 0x1e832243f98>
In [338]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[5]))

Urban Place

ANN

In [339]:
X = df_n_ps_std_tc[5]
In [340]:
y = df_n_ps[5]['chosen']
In [341]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [342]:
X_train.shape
Out[342]:
(168, 6)
In [343]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [344]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [345]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [346]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'tanh', 'hidden_layer_sizes': (20, 10), 'learning_rate_init': 0.005, 'max_iter': 200}, que permiten obtener un Accuracy de 70.83% y un Kappa del 35.52
Tiempo total: 28.81 minutos
In [347]:
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [348]:
input_tensor = Input(shape = (n0,))
In [349]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = 'tanh')(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [350]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [351]:
model.summary()
Model: "model_13"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_13 (InputLayer)        (None, 6)                 0         
_________________________________________________________________
dense_38 (Dense)             (None, 20)                140       
_________________________________________________________________
dense_39 (Dense)             (None, 10)                210       
_________________________________________________________________
dense_40 (Dense)             (None, 1)                 11        
=================================================================
Total params: 361
Trainable params: 361
Non-trainable params: 0
_________________________________________________________________
In [352]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), batch_size= 32,
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 168 samples, validate on 57 samples
Epoch 1/200
168/168 [==============================] - 0s 1ms/step - loss: 0.7224 - accuracy: 0.4821 - val_loss: 0.6746 - val_accuracy: 0.6140
Epoch 2/200
168/168 [==============================] - 0s 71us/step - loss: 0.6802 - accuracy: 0.6250 - val_loss: 0.6477 - val_accuracy: 0.7193
Epoch 3/200
168/168 [==============================] - 0s 89us/step - loss: 0.6596 - accuracy: 0.6429 - val_loss: 0.6214 - val_accuracy: 0.7719
Epoch 4/200
168/168 [==============================] - 0s 83us/step - loss: 0.6451 - accuracy: 0.6488 - val_loss: 0.6017 - val_accuracy: 0.7368
Epoch 5/200
168/168 [==============================] - 0s 83us/step - loss: 0.6335 - accuracy: 0.6310 - val_loss: 0.5810 - val_accuracy: 0.7544
Epoch 6/200
168/168 [==============================] - 0s 77us/step - loss: 0.6221 - accuracy: 0.6250 - val_loss: 0.5623 - val_accuracy: 0.7544
Epoch 7/200
168/168 [==============================] - 0s 65us/step - loss: 0.6118 - accuracy: 0.6548 - val_loss: 0.5455 - val_accuracy: 0.7544
Epoch 8/200
168/168 [==============================] - 0s 59us/step - loss: 0.6036 - accuracy: 0.6429 - val_loss: 0.5329 - val_accuracy: 0.7719
Epoch 9/200
168/168 [==============================] - 0s 83us/step - loss: 0.5973 - accuracy: 0.6905 - val_loss: 0.5210 - val_accuracy: 0.8070
Epoch 10/200
168/168 [==============================] - 0s 71us/step - loss: 0.5917 - accuracy: 0.6964 - val_loss: 0.5129 - val_accuracy: 0.8246
Epoch 11/200
168/168 [==============================] - 0s 65us/step - loss: 0.5858 - accuracy: 0.7143 - val_loss: 0.5090 - val_accuracy: 0.8246
Epoch 12/200
168/168 [==============================] - 0s 65us/step - loss: 0.5788 - accuracy: 0.7143 - val_loss: 0.5051 - val_accuracy: 0.8070
Epoch 13/200
168/168 [==============================] - 0s 65us/step - loss: 0.5718 - accuracy: 0.7024 - val_loss: 0.4995 - val_accuracy: 0.7544
Epoch 14/200
168/168 [==============================] - 0s 71us/step - loss: 0.5659 - accuracy: 0.6964 - val_loss: 0.4958 - val_accuracy: 0.7544
Epoch 15/200
168/168 [==============================] - 0s 65us/step - loss: 0.5612 - accuracy: 0.6964 - val_loss: 0.4841 - val_accuracy: 0.7719
Epoch 16/200
168/168 [==============================] - 0s 59us/step - loss: 0.5559 - accuracy: 0.7202 - val_loss: 0.4795 - val_accuracy: 0.8070
Epoch 17/200
168/168 [==============================] - 0s 65us/step - loss: 0.5512 - accuracy: 0.7143 - val_loss: 0.4756 - val_accuracy: 0.8070
Epoch 18/200
168/168 [==============================] - 0s 77us/step - loss: 0.5481 - accuracy: 0.6964 - val_loss: 0.4738 - val_accuracy: 0.7895
Epoch 19/200
168/168 [==============================] - 0s 59us/step - loss: 0.5437 - accuracy: 0.7024 - val_loss: 0.4723 - val_accuracy: 0.7895
Epoch 20/200
168/168 [==============================] - 0s 71us/step - loss: 0.5384 - accuracy: 0.7083 - val_loss: 0.4718 - val_accuracy: 0.7719

Epoch 00020: ReduceLROnPlateau reducing learning rate to 0.0024999999441206455.
Epoch 21/200
168/168 [==============================] - 0s 65us/step - loss: 0.5344 - accuracy: 0.7143 - val_loss: 0.4726 - val_accuracy: 0.7719
Epoch 22/200
168/168 [==============================] - 0s 65us/step - loss: 0.5323 - accuracy: 0.7202 - val_loss: 0.4735 - val_accuracy: 0.7719
Epoch 23/200
168/168 [==============================] - 0s 65us/step - loss: 0.5311 - accuracy: 0.7202 - val_loss: 0.4731 - val_accuracy: 0.7719
Epoch 24/200
168/168 [==============================] - 0s 65us/step - loss: 0.5295 - accuracy: 0.7321 - val_loss: 0.4720 - val_accuracy: 0.7719
Epoch 25/200
168/168 [==============================] - 0s 71us/step - loss: 0.5266 - accuracy: 0.7381 - val_loss: 0.4696 - val_accuracy: 0.7719
Epoch 26/200
168/168 [==============================] - 0s 71us/step - loss: 0.5252 - accuracy: 0.7381 - val_loss: 0.4695 - val_accuracy: 0.7719
Epoch 27/200
168/168 [==============================] - 0s 65us/step - loss: 0.5230 - accuracy: 0.7381 - val_loss: 0.4675 - val_accuracy: 0.7719
Epoch 28/200
168/168 [==============================] - 0s 71us/step - loss: 0.5213 - accuracy: 0.7381 - val_loss: 0.4670 - val_accuracy: 0.7719
Epoch 29/200
168/168 [==============================] - 0s 113us/step - loss: 0.5193 - accuracy: 0.7500 - val_loss: 0.4675 - val_accuracy: 0.7895
Epoch 30/200
168/168 [==============================] - 0s 65us/step - loss: 0.5179 - accuracy: 0.7560 - val_loss: 0.4647 - val_accuracy: 0.7895

Epoch 00030: ReduceLROnPlateau reducing learning rate to 0.0012499999720603228.
Epoch 31/200
168/168 [==============================] - 0s 59us/step - loss: 0.5159 - accuracy: 0.7560 - val_loss: 0.4624 - val_accuracy: 0.7895
Epoch 32/200
168/168 [==============================] - 0s 71us/step - loss: 0.5153 - accuracy: 0.7560 - val_loss: 0.4600 - val_accuracy: 0.7895
Epoch 33/200
168/168 [==============================] - 0s 65us/step - loss: 0.5144 - accuracy: 0.7500 - val_loss: 0.4584 - val_accuracy: 0.7895
Epoch 34/200
168/168 [==============================] - 0s 65us/step - loss: 0.5135 - accuracy: 0.7500 - val_loss: 0.4581 - val_accuracy: 0.7895
Epoch 35/200
168/168 [==============================] - 0s 59us/step - loss: 0.5129 - accuracy: 0.7619 - val_loss: 0.4576 - val_accuracy: 0.7895
Epoch 36/200
168/168 [==============================] - 0s 71us/step - loss: 0.5117 - accuracy: 0.7619 - val_loss: 0.4584 - val_accuracy: 0.7895
Epoch 37/200
168/168 [==============================] - 0s 65us/step - loss: 0.5107 - accuracy: 0.7619 - val_loss: 0.4578 - val_accuracy: 0.7895
Epoch 38/200
168/168 [==============================] - 0s 65us/step - loss: 0.5101 - accuracy: 0.7619 - val_loss: 0.4578 - val_accuracy: 0.7895
Epoch 39/200
168/168 [==============================] - 0s 59us/step - loss: 0.5089 - accuracy: 0.7619 - val_loss: 0.4571 - val_accuracy: 0.7895
Epoch 40/200
168/168 [==============================] - 0s 65us/step - loss: 0.5077 - accuracy: 0.7619 - val_loss: 0.4559 - val_accuracy: 0.7895

Epoch 00040: ReduceLROnPlateau reducing learning rate to 0.0006249999860301614.
Epoch 41/200
168/168 [==============================] - 0s 71us/step - loss: 0.5068 - accuracy: 0.7679 - val_loss: 0.4555 - val_accuracy: 0.7895
Epoch 42/200
168/168 [==============================] - 0s 71us/step - loss: 0.5065 - accuracy: 0.7679 - val_loss: 0.4551 - val_accuracy: 0.7895
Epoch 43/200
168/168 [==============================] - 0s 65us/step - loss: 0.5060 - accuracy: 0.7679 - val_loss: 0.4549 - val_accuracy: 0.7895
Epoch 44/200
168/168 [==============================] - 0s 65us/step - loss: 0.5056 - accuracy: 0.7619 - val_loss: 0.4547 - val_accuracy: 0.7895
Epoch 45/200
168/168 [==============================] - 0s 65us/step - loss: 0.5053 - accuracy: 0.7679 - val_loss: 0.4547 - val_accuracy: 0.7895
Epoch 46/200
168/168 [==============================] - 0s 59us/step - loss: 0.5051 - accuracy: 0.7619 - val_loss: 0.4550 - val_accuracy: 0.7895
Epoch 47/200
168/168 [==============================] - 0s 65us/step - loss: 0.5047 - accuracy: 0.7619 - val_loss: 0.4545 - val_accuracy: 0.7895
Epoch 48/200
168/168 [==============================] - 0s 65us/step - loss: 0.5043 - accuracy: 0.7619 - val_loss: 0.4538 - val_accuracy: 0.7895
Epoch 49/200
168/168 [==============================] - 0s 65us/step - loss: 0.5038 - accuracy: 0.7560 - val_loss: 0.4532 - val_accuracy: 0.7895
Epoch 50/200
168/168 [==============================] - 0s 65us/step - loss: 0.5032 - accuracy: 0.7619 - val_loss: 0.4530 - val_accuracy: 0.7895

Epoch 00050: ReduceLROnPlateau reducing learning rate to 0.0003124999930150807.
Epoch 51/200
168/168 [==============================] - 0s 59us/step - loss: 0.5028 - accuracy: 0.7679 - val_loss: 0.4529 - val_accuracy: 0.7895
Epoch 52/200
168/168 [==============================] - 0s 59us/step - loss: 0.5026 - accuracy: 0.7679 - val_loss: 0.4528 - val_accuracy: 0.7895
Epoch 53/200
168/168 [==============================] - 0s 71us/step - loss: 0.5023 - accuracy: 0.7679 - val_loss: 0.4528 - val_accuracy: 0.7895
Epoch 54/200
168/168 [==============================] - 0s 83us/step - loss: 0.5022 - accuracy: 0.7679 - val_loss: 0.4529 - val_accuracy: 0.7895
Epoch 55/200
168/168 [==============================] - 0s 101us/step - loss: 0.5019 - accuracy: 0.7619 - val_loss: 0.4531 - val_accuracy: 0.7895
Epoch 56/200
168/168 [==============================] - 0s 71us/step - loss: 0.5016 - accuracy: 0.7619 - val_loss: 0.4532 - val_accuracy: 0.7895
Epoch 57/200
168/168 [==============================] - 0s 65us/step - loss: 0.5015 - accuracy: 0.7619 - val_loss: 0.4533 - val_accuracy: 0.7895
Epoch 58/200
168/168 [==============================] - 0s 59us/step - loss: 0.5011 - accuracy: 0.7619 - val_loss: 0.4532 - val_accuracy: 0.7895
Epoch 59/200
168/168 [==============================] - 0s 65us/step - loss: 0.5010 - accuracy: 0.7619 - val_loss: 0.4530 - val_accuracy: 0.7895
Epoch 60/200
168/168 [==============================] - 0s 65us/step - loss: 0.5010 - accuracy: 0.7560 - val_loss: 0.4525 - val_accuracy: 0.7895

Epoch 00060: ReduceLROnPlateau reducing learning rate to 0.00015624999650754035.
Epoch 61/200
168/168 [==============================] - 0s 59us/step - loss: 0.5006 - accuracy: 0.7560 - val_loss: 0.4523 - val_accuracy: 0.7895
Epoch 62/200
168/168 [==============================] - 0s 65us/step - loss: 0.5005 - accuracy: 0.7560 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 63/200
168/168 [==============================] - 0s 65us/step - loss: 0.5004 - accuracy: 0.7560 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 64/200
168/168 [==============================] - 0s 65us/step - loss: 0.5003 - accuracy: 0.7619 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 65/200
168/168 [==============================] - 0s 59us/step - loss: 0.5001 - accuracy: 0.7619 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 66/200
168/168 [==============================] - 0s 65us/step - loss: 0.5001 - accuracy: 0.7679 - val_loss: 0.4524 - val_accuracy: 0.7895
Epoch 67/200
168/168 [==============================] - 0s 65us/step - loss: 0.4999 - accuracy: 0.7679 - val_loss: 0.4525 - val_accuracy: 0.7895
Epoch 68/200
168/168 [==============================] - 0s 71us/step - loss: 0.4998 - accuracy: 0.7679 - val_loss: 0.4527 - val_accuracy: 0.7895
Epoch 69/200
168/168 [==============================] - 0s 65us/step - loss: 0.4996 - accuracy: 0.7679 - val_loss: 0.4527 - val_accuracy: 0.7895
Epoch 70/200
168/168 [==============================] - 0s 65us/step - loss: 0.4995 - accuracy: 0.7679 - val_loss: 0.4526 - val_accuracy: 0.7895

Epoch 00070: ReduceLROnPlateau reducing learning rate to 7.812499825377017e-05.
Epoch 71/200
168/168 [==============================] - 0s 59us/step - loss: 0.4994 - accuracy: 0.7679 - val_loss: 0.4526 - val_accuracy: 0.7895
Epoch 72/200
168/168 [==============================] - 0s 59us/step - loss: 0.4994 - accuracy: 0.7679 - val_loss: 0.4526 - val_accuracy: 0.7895
Epoch 73/200
168/168 [==============================] - 0s 65us/step - loss: 0.4994 - accuracy: 0.7679 - val_loss: 0.4526 - val_accuracy: 0.7895
Epoch 74/200
168/168 [==============================] - 0s 59us/step - loss: 0.4993 - accuracy: 0.7679 - val_loss: 0.4526 - val_accuracy: 0.7895
Epoch 75/200
168/168 [==============================] - 0s 65us/step - loss: 0.4992 - accuracy: 0.7679 - val_loss: 0.4526 - val_accuracy: 0.7895
Epoch 76/200
168/168 [==============================] - 0s 59us/step - loss: 0.4992 - accuracy: 0.7679 - val_loss: 0.4525 - val_accuracy: 0.7895
Epoch 77/200
168/168 [==============================] - 0s 59us/step - loss: 0.4992 - accuracy: 0.7679 - val_loss: 0.4524 - val_accuracy: 0.7895
Epoch 78/200
168/168 [==============================] - 0s 71us/step - loss: 0.4991 - accuracy: 0.7679 - val_loss: 0.4524 - val_accuracy: 0.7895
Epoch 79/200
168/168 [==============================] - 0s 65us/step - loss: 0.4991 - accuracy: 0.7679 - val_loss: 0.4523 - val_accuracy: 0.7895
Epoch 80/200
168/168 [==============================] - 0s 65us/step - loss: 0.4990 - accuracy: 0.7679 - val_loss: 0.4523 - val_accuracy: 0.7895

Epoch 00080: ReduceLROnPlateau reducing learning rate to 3.9062499126885086e-05.
Epoch 81/200
168/168 [==============================] - 0s 107us/step - loss: 0.4990 - accuracy: 0.7679 - val_loss: 0.4523 - val_accuracy: 0.7895
Epoch 82/200
168/168 [==============================] - 0s 95us/step - loss: 0.4989 - accuracy: 0.7679 - val_loss: 0.4523 - val_accuracy: 0.7895
Epoch 83/200
168/168 [==============================] - 0s 107us/step - loss: 0.4989 - accuracy: 0.7679 - val_loss: 0.4523 - val_accuracy: 0.7895
Epoch 84/200
168/168 [==============================] - 0s 83us/step - loss: 0.4989 - accuracy: 0.7679 - val_loss: 0.4523 - val_accuracy: 0.7895
Epoch 85/200
168/168 [==============================] - 0s 65us/step - loss: 0.4989 - accuracy: 0.7679 - val_loss: 0.4523 - val_accuracy: 0.7895
Epoch 86/200
168/168 [==============================] - 0s 65us/step - loss: 0.4988 - accuracy: 0.7679 - val_loss: 0.4523 - val_accuracy: 0.7895
Epoch 87/200
168/168 [==============================] - 0s 65us/step - loss: 0.4988 - accuracy: 0.7679 - val_loss: 0.4523 - val_accuracy: 0.7895
Epoch 88/200
168/168 [==============================] - 0s 77us/step - loss: 0.4988 - accuracy: 0.7679 - val_loss: 0.4523 - val_accuracy: 0.7895
Epoch 89/200
168/168 [==============================] - 0s 71us/step - loss: 0.4988 - accuracy: 0.7679 - val_loss: 0.4523 - val_accuracy: 0.7895
Epoch 90/200
168/168 [==============================] - 0s 60us/step - loss: 0.4987 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895

Epoch 00090: ReduceLROnPlateau reducing learning rate to 1.9531249563442543e-05.
Epoch 91/200
168/168 [==============================] - 0s 59us/step - loss: 0.4987 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 92/200
168/168 [==============================] - 0s 71us/step - loss: 0.4987 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 93/200
168/168 [==============================] - 0s 65us/step - loss: 0.4987 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 94/200
168/168 [==============================] - 0s 65us/step - loss: 0.4987 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 95/200
168/168 [==============================] - 0s 95us/step - loss: 0.4987 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 96/200
168/168 [==============================] - 0s 71us/step - loss: 0.4986 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 97/200
168/168 [==============================] - 0s 65us/step - loss: 0.4986 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 98/200
168/168 [==============================] - 0s 65us/step - loss: 0.4986 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 99/200
168/168 [==============================] - 0s 65us/step - loss: 0.4986 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 100/200
168/168 [==============================] - 0s 65us/step - loss: 0.4986 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895

Epoch 00100: ReduceLROnPlateau reducing learning rate to 9.765624781721272e-06.
Epoch 101/200
168/168 [==============================] - 0s 59us/step - loss: 0.4986 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 102/200
168/168 [==============================] - 0s 71us/step - loss: 0.4986 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 103/200
168/168 [==============================] - 0s 59us/step - loss: 0.4986 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 104/200
168/168 [==============================] - 0s 59us/step - loss: 0.4986 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 105/200
168/168 [==============================] - 0s 65us/step - loss: 0.4986 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 106/200
168/168 [==============================] - 0s 59us/step - loss: 0.4986 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 107/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 108/200
168/168 [==============================] - 0s 59us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 109/200
168/168 [==============================] - 0s 59us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 110/200
168/168 [==============================] - 0s 59us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895

Epoch 00110: ReduceLROnPlateau reducing learning rate to 4.882812390860636e-06.
Epoch 111/200
168/168 [==============================] - 0s 95us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 112/200
168/168 [==============================] - 0s 77us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 113/200
168/168 [==============================] - 0s 71us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 114/200
168/168 [==============================] - 0s 71us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 115/200
168/168 [==============================] - 0s 59us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 116/200
168/168 [==============================] - 0s 71us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 117/200
168/168 [==============================] - 0s 71us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 118/200
168/168 [==============================] - 0s 71us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 119/200
168/168 [==============================] - 0s 59us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 120/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895

Epoch 00120: ReduceLROnPlateau reducing learning rate to 2.441406195430318e-06.
Epoch 121/200
168/168 [==============================] - 0s 71us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 122/200
168/168 [==============================] - 0s 71us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 123/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 124/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 125/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 126/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 127/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 128/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 129/200
168/168 [==============================] - 0s 71us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4522 - val_accuracy: 0.7895
Epoch 130/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895

Epoch 00130: ReduceLROnPlateau reducing learning rate to 1.220703097715159e-06.
Epoch 131/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 132/200
168/168 [==============================] - 0s 83us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 133/200
168/168 [==============================] - 0s 77us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 134/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 135/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 136/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 137/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 138/200
168/168 [==============================] - 0s 71us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 139/200
168/168 [==============================] - 0s 59us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 140/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895

Epoch 00140: ReduceLROnPlateau reducing learning rate to 6.103515488575795e-07.
Epoch 141/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 142/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 143/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 144/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 145/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 146/200
168/168 [==============================] - 0s 59us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 147/200
168/168 [==============================] - 0s 59us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 148/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 149/200
168/168 [==============================] - 0s 71us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 150/200
168/168 [==============================] - 0s 101us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895

Epoch 00150: ReduceLROnPlateau reducing learning rate to 3.0517577442878974e-07.
Epoch 151/200
168/168 [==============================] - 0s 77us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 152/200
168/168 [==============================] - 0s 71us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 153/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 154/200
168/168 [==============================] - 0s 71us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 155/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 156/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 157/200
168/168 [==============================] - 0s 83us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 158/200
168/168 [==============================] - 0s 83us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 159/200
168/168 [==============================] - 0s 107us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 160/200
168/168 [==============================] - 0s 89us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895

Epoch 00160: ReduceLROnPlateau reducing learning rate to 1.5258788721439487e-07.
Epoch 161/200
168/168 [==============================] - 0s 89us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 162/200
168/168 [==============================] - 0s 77us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 163/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 164/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 165/200
168/168 [==============================] - 0s 71us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 166/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 167/200
168/168 [==============================] - 0s 59us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 168/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 169/200
168/168 [==============================] - 0s 71us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 170/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895

Epoch 00170: ReduceLROnPlateau reducing learning rate to 7.629394360719743e-08.
Epoch 171/200
168/168 [==============================] - 0s 107us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 172/200
168/168 [==============================] - 0s 83us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 173/200
168/168 [==============================] - 0s 89us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 174/200
168/168 [==============================] - 0s 89us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 175/200
168/168 [==============================] - 0s 89us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 176/200
168/168 [==============================] - 0s 59us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 177/200
168/168 [==============================] - 0s 77us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 178/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 179/200
168/168 [==============================] - 0s 71us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 180/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895

Epoch 00180: ReduceLROnPlateau reducing learning rate to 3.814697180359872e-08.
Epoch 181/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 182/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 183/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 184/200
168/168 [==============================] - 0s 59us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 185/200
168/168 [==============================] - 0s 59us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 186/200
168/168 [==============================] - 0s 59us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 187/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 188/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 189/200
168/168 [==============================] - 0s 71us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 190/200
168/168 [==============================] - 0s 59us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895

Epoch 00190: ReduceLROnPlateau reducing learning rate to 1.907348590179936e-08.
Epoch 191/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 192/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 193/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 194/200
168/168 [==============================] - 0s 59us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 195/200
168/168 [==============================] - 0s 83us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 196/200
168/168 [==============================] - 0s 77us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 197/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 198/200
168/168 [==============================] - 0s 59us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 199/200
168/168 [==============================] - 0s 65us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895
Epoch 200/200
168/168 [==============================] - 0s 59us/step - loss: 0.4985 - accuracy: 0.7679 - val_loss: 0.4521 - val_accuracy: 0.7895

Epoch 00200: ReduceLROnPlateau reducing learning rate to 9.53674295089968e-09.
In [353]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 200)
In [354]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
57/57 [==============================] - 0s 53us/step
test loss: 0.4521386628611046, test accuracy: 0.7894737124443054
In [355]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.7481481481481481
In [356]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
Kappa:  0.22448979591836726

KMeans

In [357]:
X
Out[357]:
tonalcentroidfiles_1 tonalcentroidfiles_2 tonalcentroidfiles_3 tonalcentroidfiles_4 tonalcentroidfiles_5 tonalcentroidfiles_6
0 1.221298 -0.131733 -0.299437 -1.504299 -0.981450 0.118501
1 -0.401407 -1.423353 0.879221 0.761177 -0.141123 -1.312558
2 -0.336789 -0.749510 0.197427 0.350093 -0.148137 0.191187
3 -0.785803 0.323072 0.152856 -0.630916 1.431384 -1.260729
4 -0.879329 0.391910 0.348949 -0.626137 1.401315 -0.417223
5 -1.122300 0.270282 0.525297 1.043848 -0.500903 0.121388
6 1.854801 -0.110375 0.714529 -0.774181 0.550772 -0.030796
7 1.233933 0.235407 0.220324 -0.416248 0.185500 0.429491
8 0.870319 0.678474 0.499988 1.108991 1.281285 0.734961
9 0.390986 0.860360 -1.381945 0.173292 -0.891547 -0.692921
10 0.746873 1.087314 0.138873 0.673665 0.999100 1.072527
11 0.794479 0.967314 -0.363499 1.122351 0.135802 -0.719195
12 -1.413209 -0.136961 -0.371584 0.036112 -0.471990 -1.250545
13 -0.072759 -1.149988 1.952271 0.500676 0.052506 -1.006895
14 -1.273932 -0.022476 -0.393398 0.422307 -1.130763 -0.797664
15 -1.343760 -0.913734 0.077520 -0.431201 -1.914695 0.581853
16 -0.984733 -0.428093 1.220123 -0.685684 1.139966 0.712742
17 0.280303 1.062737 -0.783356 -0.172690 -0.675275 0.261145
18 0.000660 1.240654 -0.919850 -0.619755 -0.483109 -0.722220
19 0.174949 1.113514 -2.625056 -0.681779 -0.763682 0.031420
20 -0.183363 -1.557702 -0.979966 0.553109 1.161043 -0.644240
21 -0.171420 -1.728398 -0.653886 -0.361851 0.584439 -0.772678
22 -0.120605 -1.190295 1.627818 0.127251 -0.575228 0.475040
23 -1.074141 1.086592 0.969446 0.102117 0.044168 1.096704
24 -1.016635 1.061833 0.981137 -0.047605 -0.063650 0.778928
25 -1.093370 1.015536 -0.193033 -0.486717 -0.763605 -0.977680
26 -0.428676 1.178009 1.290983 0.772016 1.623629 0.867341
27 -1.196386 -0.495161 0.531435 0.385220 -1.383020 0.795049
28 0.866731 0.412772 1.289048 -0.014727 1.077496 -0.617061
29 1.237442 0.582034 1.478319 -0.565057 0.802127 -0.509015
... ... ... ... ... ... ...
195 -0.603701 0.935409 -0.419589 -0.931752 0.907517 -1.018120
196 1.708997 -0.221303 -1.454810 -0.881563 -0.670232 -0.206469
197 -0.120947 0.962072 -0.135798 1.668524 0.852748 -1.052456
198 1.023034 0.904205 -0.425604 1.452149 1.440979 2.026447
199 1.607139 1.023377 -0.668664 -0.630504 0.512795 1.087292
200 1.108675 0.450827 -1.313634 -0.450630 -1.386651 1.571188
201 0.806904 1.114638 0.335775 0.794859 0.028131 1.383505
202 1.126699 1.174832 -0.674497 0.392635 -1.533629 0.824473
203 -0.345663 -1.373739 1.109525 0.377449 -2.292182 -1.513942
204 1.158812 0.165993 -1.074239 1.512949 2.035898 0.995373
205 1.365396 -0.199409 -0.349417 -2.286552 -1.926263 0.106827
206 0.988164 -1.144285 -0.272909 -0.868581 -2.124224 1.273870
207 0.905347 -0.717727 0.050759 0.376767 -0.593122 0.072830
208 0.749668 1.003858 -0.003342 2.104938 0.631976 0.706610
209 1.094646 0.741328 -0.142645 2.175202 1.046907 1.820650
210 0.893047 0.990481 0.297007 2.573965 0.840532 0.246122
211 2.012667 0.112112 2.554833 0.422693 0.504618 -0.513686
212 0.509494 -0.082608 1.308021 -0.752580 0.394880 -1.613007
213 0.505972 0.224521 1.805274 -0.533802 1.144542 -2.601310
214 1.135681 1.126084 0.303609 1.072045 0.556683 1.593221
215 0.894848 1.251553 0.035370 1.132716 1.078850 -0.087268
216 0.990624 1.229073 0.307033 1.158121 1.115168 -0.568339
217 -1.693745 0.917833 -1.767095 -0.013187 0.902548 -0.580976
218 -1.788316 1.075356 -1.823314 0.354994 1.353021 -0.863177
219 -1.682127 0.743941 -1.291767 0.306169 1.596294 -0.005674
220 0.705066 0.687717 -1.639219 1.045094 -0.345182 3.118994
221 0.520886 1.211743 0.887971 1.035591 0.420222 1.151952
222 0.456500 -1.350072 -0.027792 -0.269632 -1.081328 0.764550
223 -0.648368 0.201861 0.483292 -0.672521 0.434345 -0.365001
224 0.650931 -1.322808 0.580194 0.873078 -0.583450 -0.803746

225 rows × 6 columns

In [358]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[358]:
[1350.0,
 1124.435731037261,
 969.7111524654798,
 861.1739982448028,
 779.024220761341,
 706.2330662595951,
 649.2198311716684,
 599.2097480238398,
 565.7104737024936,
 525.1010514512898,
 497.51074255236824,
 479.1767694058344,
 448.31267298836974,
 436.24565823492435]
In [359]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[359]:
[<matplotlib.lines.Line2D at 0x1e8322a9160>]

K=3

In [360]:
kmeans_tc = KMeans(n_clusters=3, random_state=0, n_init=10)
kmeans_tc.fit(X)
Out[360]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=3, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [361]:
kmeans_tc.labels_
Out[361]:
array([0, 2, 2, 2, 2, 2, 0, 1, 1, 1, 1, 1, 2, 2, 2, 0, 2, 1, 2, 0, 2, 2,
       2, 2, 2, 2, 1, 2, 2, 2, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 0, 0, 2, 2,
       2, 2, 0, 0, 0, 2, 0, 2, 1, 1, 2, 2, 0, 0, 0, 2, 2, 2, 2, 2, 2, 1,
       0, 0, 0, 2, 2, 2, 1, 0, 2, 2, 2, 2, 0, 0, 1, 1, 0, 0, 2, 0, 0, 0,
       1, 1, 2, 1, 1, 0, 0, 1, 2, 2, 2, 2, 0, 2, 2, 2, 0, 2, 2, 2, 0, 0,
       1, 1, 0, 1, 2, 2, 2, 1, 0, 1, 1, 2, 1, 1, 0, 0, 0, 1, 1, 1, 1, 2,
       2, 2, 2, 2, 2, 2, 2, 0, 2, 2, 2, 1, 2, 2, 1, 2, 0, 1, 2, 0, 2, 1,
       2, 1, 1, 2, 2, 1, 0, 2, 1, 0, 1, 1, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2,
       2, 0, 2, 1, 1, 0, 0, 0, 1, 1, 2, 2, 1, 2, 0, 1, 1, 1, 2, 2, 0, 1,
       1, 1, 0, 1, 1, 2, 1, 0, 0, 0, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, 2, 2,
       1, 1, 0, 2, 2])
In [362]:
clusters_tc = kmeans_tc.predict(X)
clusters_tc
Out[362]:
array([0, 2, 2, 2, 2, 2, 0, 1, 1, 1, 1, 1, 2, 2, 2, 0, 2, 1, 2, 0, 2, 2,
       2, 2, 2, 2, 1, 2, 2, 2, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 0, 0, 2, 2,
       2, 2, 0, 0, 0, 2, 0, 2, 1, 1, 2, 2, 0, 0, 0, 2, 2, 2, 2, 2, 2, 1,
       0, 0, 0, 2, 2, 2, 1, 0, 2, 2, 2, 2, 0, 0, 1, 1, 0, 0, 2, 0, 0, 0,
       1, 1, 2, 1, 1, 0, 0, 1, 2, 2, 2, 2, 0, 2, 2, 2, 0, 2, 2, 2, 0, 0,
       1, 1, 0, 1, 2, 2, 2, 1, 0, 1, 1, 2, 1, 1, 0, 0, 0, 1, 1, 1, 1, 2,
       2, 2, 2, 2, 2, 2, 2, 0, 2, 2, 2, 1, 2, 2, 1, 2, 0, 1, 2, 0, 2, 1,
       2, 1, 1, 2, 2, 1, 0, 2, 1, 0, 1, 1, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2,
       2, 0, 2, 1, 1, 0, 0, 0, 1, 1, 2, 2, 1, 2, 0, 1, 1, 1, 2, 2, 0, 1,
       1, 1, 0, 1, 1, 2, 1, 0, 0, 0, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, 2, 2,
       1, 1, 0, 2, 2])
In [363]:
X.loc[:,'Cluster'] = clusters_tc
X.loc[:,'chosen'] = list(y)
In [364]:
X
Out[364]:
tonalcentroidfiles_1 tonalcentroidfiles_2 tonalcentroidfiles_3 tonalcentroidfiles_4 tonalcentroidfiles_5 tonalcentroidfiles_6 Cluster chosen
0 1.221298 -0.131733 -0.299437 -1.504299 -0.981450 0.118501 0 0
1 -0.401407 -1.423353 0.879221 0.761177 -0.141123 -1.312558 2 0
2 -0.336789 -0.749510 0.197427 0.350093 -0.148137 0.191187 2 0
3 -0.785803 0.323072 0.152856 -0.630916 1.431384 -1.260729 2 0
4 -0.879329 0.391910 0.348949 -0.626137 1.401315 -0.417223 2 0
5 -1.122300 0.270282 0.525297 1.043848 -0.500903 0.121388 2 0
6 1.854801 -0.110375 0.714529 -0.774181 0.550772 -0.030796 0 0
7 1.233933 0.235407 0.220324 -0.416248 0.185500 0.429491 1 0
8 0.870319 0.678474 0.499988 1.108991 1.281285 0.734961 1 0
9 0.390986 0.860360 -1.381945 0.173292 -0.891547 -0.692921 1 0
10 0.746873 1.087314 0.138873 0.673665 0.999100 1.072527 1 0
11 0.794479 0.967314 -0.363499 1.122351 0.135802 -0.719195 1 0
12 -1.413209 -0.136961 -0.371584 0.036112 -0.471990 -1.250545 2 0
13 -0.072759 -1.149988 1.952271 0.500676 0.052506 -1.006895 2 0
14 -1.273932 -0.022476 -0.393398 0.422307 -1.130763 -0.797664 2 0
15 -1.343760 -0.913734 0.077520 -0.431201 -1.914695 0.581853 0 0
16 -0.984733 -0.428093 1.220123 -0.685684 1.139966 0.712742 2 0
17 0.280303 1.062737 -0.783356 -0.172690 -0.675275 0.261145 1 0
18 0.000660 1.240654 -0.919850 -0.619755 -0.483109 -0.722220 2 0
19 0.174949 1.113514 -2.625056 -0.681779 -0.763682 0.031420 0 0
20 -0.183363 -1.557702 -0.979966 0.553109 1.161043 -0.644240 2 0
21 -0.171420 -1.728398 -0.653886 -0.361851 0.584439 -0.772678 2 0
22 -0.120605 -1.190295 1.627818 0.127251 -0.575228 0.475040 2 0
23 -1.074141 1.086592 0.969446 0.102117 0.044168 1.096704 2 0
24 -1.016635 1.061833 0.981137 -0.047605 -0.063650 0.778928 2 0
25 -1.093370 1.015536 -0.193033 -0.486717 -0.763605 -0.977680 2 0
26 -0.428676 1.178009 1.290983 0.772016 1.623629 0.867341 1 0
27 -1.196386 -0.495161 0.531435 0.385220 -1.383020 0.795049 2 0
28 0.866731 0.412772 1.289048 -0.014727 1.077496 -0.617061 2 0
29 1.237442 0.582034 1.478319 -0.565057 0.802127 -0.509015 2 0
... ... ... ... ... ... ... ... ...
195 -0.603701 0.935409 -0.419589 -0.931752 0.907517 -1.018120 2 1
196 1.708997 -0.221303 -1.454810 -0.881563 -0.670232 -0.206469 0 1
197 -0.120947 0.962072 -0.135798 1.668524 0.852748 -1.052456 1 1
198 1.023034 0.904205 -0.425604 1.452149 1.440979 2.026447 1 1
199 1.607139 1.023377 -0.668664 -0.630504 0.512795 1.087292 1 1
200 1.108675 0.450827 -1.313634 -0.450630 -1.386651 1.571188 0 1
201 0.806904 1.114638 0.335775 0.794859 0.028131 1.383505 1 1
202 1.126699 1.174832 -0.674497 0.392635 -1.533629 0.824473 1 1
203 -0.345663 -1.373739 1.109525 0.377449 -2.292182 -1.513942 2 1
204 1.158812 0.165993 -1.074239 1.512949 2.035898 0.995373 1 1
205 1.365396 -0.199409 -0.349417 -2.286552 -1.926263 0.106827 0 1
206 0.988164 -1.144285 -0.272909 -0.868581 -2.124224 1.273870 0 1
207 0.905347 -0.717727 0.050759 0.376767 -0.593122 0.072830 0 1
208 0.749668 1.003858 -0.003342 2.104938 0.631976 0.706610 1 1
209 1.094646 0.741328 -0.142645 2.175202 1.046907 1.820650 1 1
210 0.893047 0.990481 0.297007 2.573965 0.840532 0.246122 1 1
211 2.012667 0.112112 2.554833 0.422693 0.504618 -0.513686 1 1
212 0.509494 -0.082608 1.308021 -0.752580 0.394880 -1.613007 2 1
213 0.505972 0.224521 1.805274 -0.533802 1.144542 -2.601310 2 1
214 1.135681 1.126084 0.303609 1.072045 0.556683 1.593221 1 1
215 0.894848 1.251553 0.035370 1.132716 1.078850 -0.087268 1 1
216 0.990624 1.229073 0.307033 1.158121 1.115168 -0.568339 1 1
217 -1.693745 0.917833 -1.767095 -0.013187 0.902548 -0.580976 2 1
218 -1.788316 1.075356 -1.823314 0.354994 1.353021 -0.863177 2 1
219 -1.682127 0.743941 -1.291767 0.306169 1.596294 -0.005674 2 1
220 0.705066 0.687717 -1.639219 1.045094 -0.345182 3.118994 1 1
221 0.520886 1.211743 0.887971 1.035591 0.420222 1.151952 1 1
222 0.456500 -1.350072 -0.027792 -0.269632 -1.081328 0.764550 0 1
223 -0.648368 0.201861 0.483292 -0.672521 0.434345 -0.365001 2 1
224 0.650931 -1.322808 0.580194 0.873078 -0.583450 -0.803746 2 1

225 rows × 8 columns

In [365]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[365]:
<matplotlib.axes._subplots.AxesSubplot at 0x1e831cecf60>

Chromagram

In [186]:
df_n_ps_std[0].columns
Out[186]:
Index(['durationfiles', 'rmsfiles', 'rmsmedianfiles', 'lowenergyfiles',
       'ASRfiles', 'beatspectrumfiles', 'eventdensityfiles', 'tempofiles',
       'pulseclarityfiles', 'zerocrossfiles', 'rolloffsfiles',
       'brightnessfiles', 'spreadfiles', 'centroidfiles', 'kurtosisfiles',
       'flatnessfiles', 'entropyfiles', 'mfccfiles_1', 'mfccfiles_2',
       'mfccfiles_3', 'mfccfiles_4', 'mfccfiles_5', 'mfccfiles_6',
       'mfccfiles_7', 'mfccfiles_8', 'mfccfiles_9', 'mfccfiles_10',
       'mfccfiles_11', 'mfccfiles_12', 'mfccfiles_13', 'inharmonicityfiles',
       'bestkeyfiles', 'keyclarityfiles', 'modalityfiles',
       'tonalcentroidfiles_1', 'tonalcentroidfiles_2', 'tonalcentroidfiles_3',
       'tonalcentroidfiles_4', 'tonalcentroidfiles_5', 'tonalcentroidfiles_6',
       'chromagramfiles_1', 'chromagramfiles_2', 'chromagramfiles_3',
       'chromagramfiles_4', 'chromagramfiles_5', 'chromagramfiles_6',
       'chromagramfiles_7', 'chromagramfiles_8', 'chromagramfiles_9',
       'chromagramfiles_10', 'chromagramfiles_11', 'chromagramfiles_12',
       'attackslopefiles', 'attackleapfiles', 'chosen'],
      dtype='object')
In [187]:
df_n_ps_std[0].columns[40:52]
Out[187]:
Index(['tonalcentroidfiles_1', 'tonalcentroidfiles_2', 'tonalcentroidfiles_3',
       'tonalcentroidfiles_4', 'tonalcentroidfiles_5', 'tonalcentroidfiles_6'],
      dtype='object')
In [188]:
df_n_ps_std_ch = [None]*len(companies)
for i in range(len(companies)):
    df_n_ps_std_ch[i] = pd.DataFrame(df_n_ps_std[i].iloc[:,40:52])
    df_n_ps_std_ch[i].columns=df_n_ps_std[i].columns[40:52]
df_n_ps_std_ch[0].info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 372 entries, 0 to 371
Data columns (total 6 columns):
tonalcentroidfiles_1    372 non-null float64
tonalcentroidfiles_2    372 non-null float64
tonalcentroidfiles_3    372 non-null float64
tonalcentroidfiles_4    372 non-null float64
tonalcentroidfiles_5    372 non-null float64
tonalcentroidfiles_6    372 non-null float64
dtypes: float64(6)
memory usage: 17.5 KB

Arte Francés

ANN

In [189]:
X = df_n_ps_std_ch[0]
In [190]:
y = df_n_ps[0]['chosen']
In [191]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [192]:
X_train.shape
Out[192]:
(279, 6)
In [193]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [194]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [195]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [196]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'tanh', 'hidden_layer_sizes': (30, 30, 30), 'learning_rate_init': 0.02, 'max_iter': 2000}, que permiten obtener un Accuracy de 78.85% y un Kappa del 15.23
Tiempo total: 15.53 minutos
In [207]:
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [208]:
input_tensor = Input(shape = (n0,))
In [209]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = 'tanh')(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [210]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [211]:
model.summary()
Model: "model_8"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_8 (InputLayer)         (None, 6)                 0         
_________________________________________________________________
dense_24 (Dense)             (None, 30)                210       
_________________________________________________________________
dense_25 (Dense)             (None, 30)                930       
_________________________________________________________________
dense_26 (Dense)             (None, 30)                930       
_________________________________________________________________
dense_27 (Dense)             (None, 1)                 31        
=================================================================
Total params: 2,101
Trainable params: 2,101
Non-trainable params: 0
_________________________________________________________________
In [212]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), batch_size= 32,
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 279 samples, validate on 93 samples
Epoch 1/2000
279/279 [==============================] - 0s 659us/step - loss: 0.6546 - accuracy: 0.6631 - val_loss: 0.6013 - val_accuracy: 0.7634
Epoch 2/2000
279/279 [==============================] - 0s 57us/step - loss: 0.5470 - accuracy: 0.7634 - val_loss: 0.5640 - val_accuracy: 0.7527
Epoch 3/2000
279/279 [==============================] - 0s 50us/step - loss: 0.5241 - accuracy: 0.7706 - val_loss: 0.5577 - val_accuracy: 0.7527
Epoch 4/2000
279/279 [==============================] - 0s 54us/step - loss: 0.5035 - accuracy: 0.7563 - val_loss: 0.5561 - val_accuracy: 0.7527
Epoch 5/2000
279/279 [==============================] - 0s 54us/step - loss: 0.4957 - accuracy: 0.7706 - val_loss: 0.5495 - val_accuracy: 0.7527
Epoch 6/2000
279/279 [==============================] - 0s 54us/step - loss: 0.4862 - accuracy: 0.7599 - val_loss: 0.5352 - val_accuracy: 0.7527
Epoch 7/2000
279/279 [==============================] - 0s 57us/step - loss: 0.4851 - accuracy: 0.7742 - val_loss: 0.5304 - val_accuracy: 0.7634
Epoch 8/2000
279/279 [==============================] - 0s 54us/step - loss: 0.4595 - accuracy: 0.7778 - val_loss: 0.5112 - val_accuracy: 0.7419
Epoch 9/2000
279/279 [==============================] - 0s 54us/step - loss: 0.4720 - accuracy: 0.7921 - val_loss: 0.5261 - val_accuracy: 0.7527
Epoch 10/2000
279/279 [==============================] - 0s 54us/step - loss: 0.4575 - accuracy: 0.7957 - val_loss: 0.5074 - val_accuracy: 0.7527
Epoch 11/2000
279/279 [==============================] - 0s 50us/step - loss: 0.4290 - accuracy: 0.7921 - val_loss: 0.5122 - val_accuracy: 0.7419

Epoch 00011: ReduceLROnPlateau reducing learning rate to 0.009999999776482582.
Epoch 12/2000
279/279 [==============================] - 0s 57us/step - loss: 0.3950 - accuracy: 0.8208 - val_loss: 0.5183 - val_accuracy: 0.7634
Epoch 13/2000
279/279 [==============================] - 0s 61us/step - loss: 0.3718 - accuracy: 0.8244 - val_loss: 0.4976 - val_accuracy: 0.7634
Epoch 14/2000
279/279 [==============================] - 0s 79us/step - loss: 0.3658 - accuracy: 0.8208 - val_loss: 0.5062 - val_accuracy: 0.7527
Epoch 15/2000
279/279 [==============================] - 0s 57us/step - loss: 0.3475 - accuracy: 0.8459 - val_loss: 0.5192 - val_accuracy: 0.7419
Epoch 16/2000
279/279 [==============================] - 0s 57us/step - loss: 0.3291 - accuracy: 0.8495 - val_loss: 0.5023 - val_accuracy: 0.7419
Epoch 17/2000
279/279 [==============================] - 0s 57us/step - loss: 0.3115 - accuracy: 0.8566 - val_loss: 0.4928 - val_accuracy: 0.7634
Epoch 18/2000
279/279 [==============================] - 0s 64us/step - loss: 0.2897 - accuracy: 0.8674 - val_loss: 0.5426 - val_accuracy: 0.7957
Epoch 19/2000
279/279 [==============================] - 0s 61us/step - loss: 0.2844 - accuracy: 0.8853 - val_loss: 0.4936 - val_accuracy: 0.7957
Epoch 20/2000
279/279 [==============================] - 0s 57us/step - loss: 0.2653 - accuracy: 0.8746 - val_loss: 0.5283 - val_accuracy: 0.7419
Epoch 21/2000
279/279 [==============================] - 0s 57us/step - loss: 0.2578 - accuracy: 0.8889 - val_loss: 0.5283 - val_accuracy: 0.7849
Epoch 22/2000
279/279 [==============================] - 0s 72us/step - loss: 0.2388 - accuracy: 0.9032 - val_loss: 0.5246 - val_accuracy: 0.8065
Epoch 23/2000
279/279 [==============================] - 0s 72us/step - loss: 0.2229 - accuracy: 0.9140 - val_loss: 0.5452 - val_accuracy: 0.7527
Epoch 24/2000
279/279 [==============================] - 0s 57us/step - loss: 0.2068 - accuracy: 0.9247 - val_loss: 0.5425 - val_accuracy: 0.7742
Epoch 25/2000
279/279 [==============================] - 0s 75us/step - loss: 0.1910 - accuracy: 0.9247 - val_loss: 0.6091 - val_accuracy: 0.7742
Epoch 26/2000
279/279 [==============================] - 0s 64us/step - loss: 0.1861 - accuracy: 0.9104 - val_loss: 0.5379 - val_accuracy: 0.7742
Epoch 27/2000
279/279 [==============================] - 0s 61us/step - loss: 0.1750 - accuracy: 0.9319 - val_loss: 0.6090 - val_accuracy: 0.7634
Epoch 28/2000
279/279 [==============================] - 0s 57us/step - loss: 0.1684 - accuracy: 0.9355 - val_loss: 0.6083 - val_accuracy: 0.7419
Epoch 29/2000
279/279 [==============================] - 0s 61us/step - loss: 0.1458 - accuracy: 0.9391 - val_loss: 0.6428 - val_accuracy: 0.7742
Epoch 30/2000
279/279 [==============================] - 0s 61us/step - loss: 0.1362 - accuracy: 0.9355 - val_loss: 0.6483 - val_accuracy: 0.7527
Epoch 31/2000
279/279 [==============================] - 0s 57us/step - loss: 0.1184 - accuracy: 0.9570 - val_loss: 0.6489 - val_accuracy: 0.7312
Epoch 32/2000
279/279 [==============================] - 0s 57us/step - loss: 0.1156 - accuracy: 0.9570 - val_loss: 0.7299 - val_accuracy: 0.7419

Epoch 00032: ReduceLROnPlateau reducing learning rate to 0.004999999888241291.
Epoch 33/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0957 - accuracy: 0.9677 - val_loss: 0.7016 - val_accuracy: 0.7419
Epoch 34/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0896 - accuracy: 0.9821 - val_loss: 0.6892 - val_accuracy: 0.7742
Epoch 35/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0822 - accuracy: 0.9857 - val_loss: 0.7386 - val_accuracy: 0.7527
Epoch 36/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0810 - accuracy: 0.9892 - val_loss: 0.7440 - val_accuracy: 0.7634
Epoch 37/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0737 - accuracy: 0.9892 - val_loss: 0.7301 - val_accuracy: 0.7634
Epoch 38/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0697 - accuracy: 0.9928 - val_loss: 0.7536 - val_accuracy: 0.7742
Epoch 39/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0679 - accuracy: 0.9928 - val_loss: 0.7519 - val_accuracy: 0.7634
Epoch 40/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0633 - accuracy: 0.9892 - val_loss: 0.7982 - val_accuracy: 0.7527
Epoch 41/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0619 - accuracy: 0.9892 - val_loss: 0.7953 - val_accuracy: 0.7742
Epoch 42/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0591 - accuracy: 0.9928 - val_loss: 0.7795 - val_accuracy: 0.7634

Epoch 00042: ReduceLROnPlateau reducing learning rate to 0.0024999999441206455.
Epoch 43/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0544 - accuracy: 0.9892 - val_loss: 0.7961 - val_accuracy: 0.7634
Epoch 44/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0498 - accuracy: 0.9928 - val_loss: 0.8283 - val_accuracy: 0.7634
Epoch 45/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0503 - accuracy: 0.9928 - val_loss: 0.8277 - val_accuracy: 0.7849
Epoch 46/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0482 - accuracy: 0.9928 - val_loss: 0.8270 - val_accuracy: 0.7634
Epoch 47/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0465 - accuracy: 0.9928 - val_loss: 0.8391 - val_accuracy: 0.7634
Epoch 48/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0459 - accuracy: 0.9964 - val_loss: 0.8561 - val_accuracy: 0.7419
Epoch 49/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0442 - accuracy: 0.9928 - val_loss: 0.8438 - val_accuracy: 0.7634
Epoch 50/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0435 - accuracy: 0.9964 - val_loss: 0.8739 - val_accuracy: 0.7419
Epoch 51/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0407 - accuracy: 0.9964 - val_loss: 0.8670 - val_accuracy: 0.7634
Epoch 52/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0411 - accuracy: 0.9928 - val_loss: 0.8566 - val_accuracy: 0.7527

Epoch 00052: ReduceLROnPlateau reducing learning rate to 0.0012499999720603228.
Epoch 53/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0384 - accuracy: 0.9928 - val_loss: 0.8780 - val_accuracy: 0.7634
Epoch 54/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0374 - accuracy: 0.9964 - val_loss: 0.8862 - val_accuracy: 0.7634
Epoch 55/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0365 - accuracy: 0.9964 - val_loss: 0.8844 - val_accuracy: 0.7634
Epoch 56/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0360 - accuracy: 0.9964 - val_loss: 0.8884 - val_accuracy: 0.7634
Epoch 57/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0353 - accuracy: 0.9964 - val_loss: 0.8878 - val_accuracy: 0.7527
Epoch 58/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0349 - accuracy: 0.9964 - val_loss: 0.8975 - val_accuracy: 0.7527
Epoch 59/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0347 - accuracy: 0.9964 - val_loss: 0.8923 - val_accuracy: 0.7634
Epoch 60/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0337 - accuracy: 0.9964 - val_loss: 0.8985 - val_accuracy: 0.7742
Epoch 61/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0331 - accuracy: 0.9964 - val_loss: 0.9074 - val_accuracy: 0.7742
Epoch 62/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0325 - accuracy: 0.9964 - val_loss: 0.9097 - val_accuracy: 0.7742

Epoch 00062: ReduceLROnPlateau reducing learning rate to 0.0006249999860301614.
Epoch 63/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0319 - accuracy: 0.9964 - val_loss: 0.9112 - val_accuracy: 0.7634
Epoch 64/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0316 - accuracy: 0.9964 - val_loss: 0.9168 - val_accuracy: 0.7634
Epoch 65/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0313 - accuracy: 0.9964 - val_loss: 0.9171 - val_accuracy: 0.7634
Epoch 66/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0310 - accuracy: 0.9964 - val_loss: 0.9149 - val_accuracy: 0.7634
Epoch 67/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0306 - accuracy: 0.9964 - val_loss: 0.9193 - val_accuracy: 0.7634
Epoch 68/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0304 - accuracy: 0.9964 - val_loss: 0.9232 - val_accuracy: 0.7634
Epoch 69/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0302 - accuracy: 0.9964 - val_loss: 0.9213 - val_accuracy: 0.7634
Epoch 70/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0299 - accuracy: 0.9964 - val_loss: 0.9208 - val_accuracy: 0.7634
Epoch 71/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0296 - accuracy: 0.9964 - val_loss: 0.9229 - val_accuracy: 0.7634
Epoch 72/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0293 - accuracy: 0.9964 - val_loss: 0.9256 - val_accuracy: 0.7634

Epoch 00072: ReduceLROnPlateau reducing learning rate to 0.0003124999930150807.
Epoch 73/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0290 - accuracy: 0.9964 - val_loss: 0.9282 - val_accuracy: 0.7634
Epoch 74/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0289 - accuracy: 0.9964 - val_loss: 0.9280 - val_accuracy: 0.7634
Epoch 75/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0287 - accuracy: 0.9964 - val_loss: 0.9288 - val_accuracy: 0.7634
Epoch 76/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0285 - accuracy: 0.9964 - val_loss: 0.9293 - val_accuracy: 0.7634
Epoch 77/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0285 - accuracy: 0.9964 - val_loss: 0.9310 - val_accuracy: 0.7634
Epoch 78/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0283 - accuracy: 0.9964 - val_loss: 0.9331 - val_accuracy: 0.7634
Epoch 79/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0282 - accuracy: 0.9964 - val_loss: 0.9335 - val_accuracy: 0.7634
Epoch 80/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0280 - accuracy: 1.0000 - val_loss: 0.9355 - val_accuracy: 0.7634
Epoch 81/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0279 - accuracy: 1.0000 - val_loss: 0.9377 - val_accuracy: 0.7634
Epoch 82/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0278 - accuracy: 1.0000 - val_loss: 0.9380 - val_accuracy: 0.7634

Epoch 00082: ReduceLROnPlateau reducing learning rate to 0.00015624999650754035.
Epoch 83/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0276 - accuracy: 1.0000 - val_loss: 0.9395 - val_accuracy: 0.7634
Epoch 84/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0276 - accuracy: 1.0000 - val_loss: 0.9393 - val_accuracy: 0.7634
Epoch 85/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0275 - accuracy: 1.0000 - val_loss: 0.9401 - val_accuracy: 0.7634
Epoch 86/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0274 - accuracy: 1.0000 - val_loss: 0.9402 - val_accuracy: 0.7634
Epoch 87/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0273 - accuracy: 1.0000 - val_loss: 0.9403 - val_accuracy: 0.7634
Epoch 88/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0272 - accuracy: 1.0000 - val_loss: 0.9407 - val_accuracy: 0.7634
Epoch 89/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0272 - accuracy: 1.0000 - val_loss: 0.9410 - val_accuracy: 0.7634
Epoch 90/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0271 - accuracy: 1.0000 - val_loss: 0.9408 - val_accuracy: 0.7634
Epoch 91/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0271 - accuracy: 1.0000 - val_loss: 0.9427 - val_accuracy: 0.7634
Epoch 92/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0270 - accuracy: 1.0000 - val_loss: 0.9434 - val_accuracy: 0.7634

Epoch 00092: ReduceLROnPlateau reducing learning rate to 7.812499825377017e-05.
Epoch 93/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0269 - accuracy: 1.0000 - val_loss: 0.9435 - val_accuracy: 0.7634
Epoch 94/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0269 - accuracy: 1.0000 - val_loss: 0.9433 - val_accuracy: 0.7634
Epoch 95/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0268 - accuracy: 1.0000 - val_loss: 0.9434 - val_accuracy: 0.7634
Epoch 96/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0268 - accuracy: 1.0000 - val_loss: 0.9432 - val_accuracy: 0.7634
Epoch 97/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0268 - accuracy: 1.0000 - val_loss: 0.9438 - val_accuracy: 0.7634
Epoch 98/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0267 - accuracy: 1.0000 - val_loss: 0.9441 - val_accuracy: 0.7634
Epoch 99/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0267 - accuracy: 1.0000 - val_loss: 0.9440 - val_accuracy: 0.7634
Epoch 100/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0267 - accuracy: 1.0000 - val_loss: 0.9444 - val_accuracy: 0.7634
Epoch 101/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0266 - accuracy: 1.0000 - val_loss: 0.9454 - val_accuracy: 0.7634
Epoch 102/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0266 - accuracy: 1.0000 - val_loss: 0.9450 - val_accuracy: 0.7634

Epoch 00102: ReduceLROnPlateau reducing learning rate to 3.9062499126885086e-05.
Epoch 103/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0265 - accuracy: 1.0000 - val_loss: 0.9450 - val_accuracy: 0.7634
Epoch 104/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0265 - accuracy: 1.0000 - val_loss: 0.9451 - val_accuracy: 0.7634
Epoch 105/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0265 - accuracy: 1.0000 - val_loss: 0.9453 - val_accuracy: 0.7634
Epoch 106/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0265 - accuracy: 1.0000 - val_loss: 0.9455 - val_accuracy: 0.7634
Epoch 107/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0265 - accuracy: 1.0000 - val_loss: 0.9456 - val_accuracy: 0.7634
Epoch 108/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0265 - accuracy: 1.0000 - val_loss: 0.9458 - val_accuracy: 0.7634
Epoch 109/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0264 - accuracy: 1.0000 - val_loss: 0.9461 - val_accuracy: 0.7634
Epoch 110/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0264 - accuracy: 1.0000 - val_loss: 0.9462 - val_accuracy: 0.7634
Epoch 111/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0264 - accuracy: 1.0000 - val_loss: 0.9465 - val_accuracy: 0.7634
Epoch 112/2000
279/279 [==============================] - ETA: 0s - loss: 0.0185 - accuracy: 1.00 - 0s 57us/step - loss: 0.0264 - accuracy: 1.0000 - val_loss: 0.9466 - val_accuracy: 0.7634

Epoch 00112: ReduceLROnPlateau reducing learning rate to 1.9531249563442543e-05.
Epoch 113/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0264 - accuracy: 1.0000 - val_loss: 0.9467 - val_accuracy: 0.7634
Epoch 114/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0263 - accuracy: 1.0000 - val_loss: 0.9467 - val_accuracy: 0.7634
Epoch 115/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0263 - accuracy: 1.0000 - val_loss: 0.9467 - val_accuracy: 0.7634
Epoch 116/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0263 - accuracy: 1.0000 - val_loss: 0.9468 - val_accuracy: 0.7634
Epoch 117/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0263 - accuracy: 1.0000 - val_loss: 0.9468 - val_accuracy: 0.7634
Epoch 118/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0263 - accuracy: 1.0000 - val_loss: 0.9468 - val_accuracy: 0.7634
Epoch 119/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0263 - accuracy: 1.0000 - val_loss: 0.9470 - val_accuracy: 0.7634
Epoch 120/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0263 - accuracy: 1.0000 - val_loss: 0.9471 - val_accuracy: 0.7634
Epoch 121/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0263 - accuracy: 1.0000 - val_loss: 0.9472 - val_accuracy: 0.7634
Epoch 122/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0263 - accuracy: 1.0000 - val_loss: 0.9473 - val_accuracy: 0.7634

Epoch 00122: ReduceLROnPlateau reducing learning rate to 9.765624781721272e-06.
Epoch 123/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0263 - accuracy: 1.0000 - val_loss: 0.9473 - val_accuracy: 0.7634
Epoch 124/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0263 - accuracy: 1.0000 - val_loss: 0.9473 - val_accuracy: 0.7634
Epoch 125/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0263 - accuracy: 1.0000 - val_loss: 0.9474 - val_accuracy: 0.7634
Epoch 126/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9474 - val_accuracy: 0.7634
Epoch 127/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9474 - val_accuracy: 0.7634
Epoch 128/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9475 - val_accuracy: 0.7634
Epoch 129/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9475 - val_accuracy: 0.7634
Epoch 130/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9476 - val_accuracy: 0.7634
Epoch 131/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9476 - val_accuracy: 0.7634
Epoch 132/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9476 - val_accuracy: 0.7634

Epoch 00132: ReduceLROnPlateau reducing learning rate to 4.882812390860636e-06.
Epoch 133/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9477 - val_accuracy: 0.7634
Epoch 134/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9477 - val_accuracy: 0.7634
Epoch 135/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9477 - val_accuracy: 0.7634
Epoch 136/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9477 - val_accuracy: 0.7634
Epoch 137/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9477 - val_accuracy: 0.7634
Epoch 138/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9477 - val_accuracy: 0.7634
Epoch 139/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9478 - val_accuracy: 0.7634
Epoch 140/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9478 - val_accuracy: 0.7634
Epoch 141/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9478 - val_accuracy: 0.7634
Epoch 142/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9478 - val_accuracy: 0.7634

Epoch 00142: ReduceLROnPlateau reducing learning rate to 2.441406195430318e-06.
Epoch 143/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9478 - val_accuracy: 0.7634
Epoch 144/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9478 - val_accuracy: 0.7634
Epoch 145/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9479 - val_accuracy: 0.7634
Epoch 146/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9479 - val_accuracy: 0.7634
Epoch 147/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9479 - val_accuracy: 0.7634
Epoch 148/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9479 - val_accuracy: 0.7634
Epoch 149/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9479 - val_accuracy: 0.7634
Epoch 150/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9479 - val_accuracy: 0.7634
Epoch 151/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9479 - val_accuracy: 0.7634
Epoch 152/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9479 - val_accuracy: 0.7634

Epoch 00152: ReduceLROnPlateau reducing learning rate to 1.220703097715159e-06.
Epoch 153/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9479 - val_accuracy: 0.7634
Epoch 154/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9479 - val_accuracy: 0.7634
Epoch 155/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 156/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 157/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 158/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 159/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 160/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 161/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 162/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00162: ReduceLROnPlateau reducing learning rate to 6.103515488575795e-07.
Epoch 163/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 164/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 165/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 166/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 167/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 168/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 169/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 170/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 171/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 172/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00172: ReduceLROnPlateau reducing learning rate to 3.0517577442878974e-07.
Epoch 173/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 174/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 175/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 176/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 177/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 178/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 179/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 180/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 181/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 182/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00182: ReduceLROnPlateau reducing learning rate to 1.5258788721439487e-07.
Epoch 183/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 184/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 185/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 186/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 187/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 188/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 189/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 190/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 191/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 192/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00192: ReduceLROnPlateau reducing learning rate to 7.629394360719743e-08.
Epoch 193/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 194/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 195/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 196/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 197/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 198/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 199/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 200/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 201/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 202/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00202: ReduceLROnPlateau reducing learning rate to 3.814697180359872e-08.
Epoch 203/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 204/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 205/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 206/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 207/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 208/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 209/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 210/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 211/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 212/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00212: ReduceLROnPlateau reducing learning rate to 1.907348590179936e-08.
Epoch 213/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 214/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 215/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 216/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 217/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 218/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 219/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 220/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 221/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 222/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00222: ReduceLROnPlateau reducing learning rate to 9.53674295089968e-09.
Epoch 223/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 224/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 225/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 226/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 227/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 228/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 229/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 230/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 231/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 232/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00232: ReduceLROnPlateau reducing learning rate to 4.76837147544984e-09.
Epoch 233/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 234/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 235/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 236/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 237/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 238/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 239/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 240/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 241/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 242/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00242: ReduceLROnPlateau reducing learning rate to 2.38418573772492e-09.
Epoch 243/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 244/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 245/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 246/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 247/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 248/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 249/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 250/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 251/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 252/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00252: ReduceLROnPlateau reducing learning rate to 1.19209286886246e-09.
Epoch 253/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 254/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 255/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 256/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 257/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 258/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 259/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 260/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 261/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 262/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00262: ReduceLROnPlateau reducing learning rate to 5.9604643443123e-10.
Epoch 263/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 264/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 265/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 266/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 267/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 268/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 269/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 270/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 271/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 272/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00272: ReduceLROnPlateau reducing learning rate to 2.98023217215615e-10.
Epoch 273/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 274/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 275/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 276/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 277/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 278/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 279/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 280/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 281/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 282/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00282: ReduceLROnPlateau reducing learning rate to 1.490116086078075e-10.
Epoch 283/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 284/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 285/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 286/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 287/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 288/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 289/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 290/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 291/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 292/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00292: ReduceLROnPlateau reducing learning rate to 7.450580430390374e-11.
Epoch 293/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 294/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 295/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 296/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 297/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 298/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 299/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 300/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 301/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 302/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00302: ReduceLROnPlateau reducing learning rate to 3.725290215195187e-11.
Epoch 303/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 304/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 305/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 306/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 307/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 308/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 309/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 310/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 311/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 312/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00312: ReduceLROnPlateau reducing learning rate to 1.8626451075975936e-11.
Epoch 313/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 314/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 315/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 316/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 317/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 318/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 319/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 320/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 321/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 322/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00322: ReduceLROnPlateau reducing learning rate to 9.313225537987968e-12.
Epoch 323/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 324/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 325/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 326/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 327/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 328/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 329/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 330/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 331/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 332/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00332: ReduceLROnPlateau reducing learning rate to 4.656612768993984e-12.
Epoch 333/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 334/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 335/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 336/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 337/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 338/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 339/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 340/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 341/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 342/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00342: ReduceLROnPlateau reducing learning rate to 2.328306384496992e-12.
Epoch 343/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 344/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 345/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 346/2000
279/279 [==============================] - 0s 168us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 347/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 348/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 349/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 350/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 351/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 352/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00352: ReduceLROnPlateau reducing learning rate to 1.164153192248496e-12.
Epoch 353/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 354/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 355/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 356/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 357/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 358/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 359/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 360/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 361/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 362/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00362: ReduceLROnPlateau reducing learning rate to 5.82076596124248e-13.
Epoch 363/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 364/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 365/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 366/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 367/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 368/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 369/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 370/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 371/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 372/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00372: ReduceLROnPlateau reducing learning rate to 2.91038298062124e-13.
Epoch 373/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 374/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 375/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 376/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 377/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 378/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 379/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 380/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 381/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 382/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00382: ReduceLROnPlateau reducing learning rate to 1.45519149031062e-13.
Epoch 383/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 384/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 385/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 386/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 387/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 388/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 389/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 390/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 391/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 392/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00392: ReduceLROnPlateau reducing learning rate to 7.2759574515531e-14.
Epoch 393/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 394/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 395/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 396/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 397/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 398/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 399/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 400/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 401/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 402/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00402: ReduceLROnPlateau reducing learning rate to 3.63797872577655e-14.
Epoch 403/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 404/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 405/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 406/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 407/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 408/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 409/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 410/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 411/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 412/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00412: ReduceLROnPlateau reducing learning rate to 1.818989362888275e-14.
Epoch 413/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 414/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 415/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 416/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 417/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 418/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 419/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 420/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 421/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 422/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00422: ReduceLROnPlateau reducing learning rate to 9.094946814441375e-15.
Epoch 423/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 424/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 425/2000
279/279 [==============================] - 0s 65us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 426/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 427/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 428/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 429/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 430/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 431/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 432/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00432: ReduceLROnPlateau reducing learning rate to 4.5474734072206875e-15.
Epoch 433/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 434/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 435/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 436/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 437/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 438/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 439/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 440/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 441/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 442/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00442: ReduceLROnPlateau reducing learning rate to 2.2737367036103438e-15.
Epoch 443/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 444/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 445/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 446/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 447/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 448/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 449/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 450/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 451/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 452/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00452: ReduceLROnPlateau reducing learning rate to 1.1368683518051719e-15.
Epoch 453/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 454/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 455/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 456/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 457/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 458/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 459/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 460/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 461/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 462/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00462: ReduceLROnPlateau reducing learning rate to 5.684341759025859e-16.
Epoch 463/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 464/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 465/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 466/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 467/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 468/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 469/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 470/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 471/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 472/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00472: ReduceLROnPlateau reducing learning rate to 2.8421708795129297e-16.
Epoch 473/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 474/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 475/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 476/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 477/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 478/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 479/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 480/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 481/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 482/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00482: ReduceLROnPlateau reducing learning rate to 1.4210854397564648e-16.
Epoch 483/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 484/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 485/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 486/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 487/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 488/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 489/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 490/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 491/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 492/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00492: ReduceLROnPlateau reducing learning rate to 7.105427198782324e-17.
Epoch 493/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 494/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 495/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 496/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 497/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 498/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 499/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 500/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 501/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 502/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00502: ReduceLROnPlateau reducing learning rate to 3.552713599391162e-17.
Epoch 503/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 504/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 505/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 506/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 507/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 508/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 509/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 510/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 511/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 512/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00512: ReduceLROnPlateau reducing learning rate to 1.776356799695581e-17.
Epoch 513/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 514/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 515/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 516/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 517/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 518/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 519/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 520/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 521/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 522/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00522: ReduceLROnPlateau reducing learning rate to 8.881783998477905e-18.
Epoch 523/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 524/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 525/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 526/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 527/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 528/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 529/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 530/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 531/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 532/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00532: ReduceLROnPlateau reducing learning rate to 4.440891999238953e-18.
Epoch 533/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 534/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 535/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 536/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 537/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 538/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 539/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 540/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 541/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 542/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00542: ReduceLROnPlateau reducing learning rate to 2.2204459996194763e-18.
Epoch 543/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 544/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 545/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 546/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 547/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 548/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 549/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 550/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 551/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 552/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00552: ReduceLROnPlateau reducing learning rate to 1.1102229998097382e-18.
Epoch 553/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 554/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 555/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 556/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 557/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 558/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 559/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 560/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 561/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 562/2000
279/279 [==============================] - ETA: 0s - loss: 0.0241 - accuracy: 1.00 - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00562: ReduceLROnPlateau reducing learning rate to 5.551114999048691e-19.
Epoch 563/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 564/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 565/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 566/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 567/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 568/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 569/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 570/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 571/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 572/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00572: ReduceLROnPlateau reducing learning rate to 2.7755574995243454e-19.
Epoch 573/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 574/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 575/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 576/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 577/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 578/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 579/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 580/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 581/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 582/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00582: ReduceLROnPlateau reducing learning rate to 1.3877787497621727e-19.
Epoch 583/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 584/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 585/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 586/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 587/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 588/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 589/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 590/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 591/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 592/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00592: ReduceLROnPlateau reducing learning rate to 6.938893748810864e-20.
Epoch 593/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 594/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 595/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 596/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 597/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 598/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 599/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 600/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 601/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 602/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00602: ReduceLROnPlateau reducing learning rate to 3.469446874405432e-20.
Epoch 603/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 604/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 605/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 606/2000
279/279 [==============================] - 0s 54us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 607/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 608/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 609/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 610/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 611/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 612/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00612: ReduceLROnPlateau reducing learning rate to 1.734723437202716e-20.
Epoch 613/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 614/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 615/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 616/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 617/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 618/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 619/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 620/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 621/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 622/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00622: ReduceLROnPlateau reducing learning rate to 8.67361718601358e-21.
Epoch 623/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 624/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 625/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 626/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 627/2000
279/279 [==============================] - 0s 57us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 628/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 629/2000
279/279 [==============================] - 0s 64us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 630/2000
279/279 [==============================] - 0s 61us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 631/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 632/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00632: ReduceLROnPlateau reducing learning rate to 4.33680859300679e-21.
Epoch 633/2000
279/279 [==============================] - 0s 68us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 634/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 635/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 636/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 637/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 638/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 639/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 640/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 641/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 642/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00642: ReduceLROnPlateau reducing learning rate to 2.168404296503395e-21.
Epoch 643/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 644/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 645/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 646/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 647/2000
279/279 [==============================] - ETA: 0s - loss: 0.0232 - accuracy: 1.00 - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 648/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 649/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 650/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 651/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 652/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00652: ReduceLROnPlateau reducing learning rate to 1.0842021482516974e-21.
Epoch 653/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 654/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 655/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 656/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 657/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 658/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 659/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 660/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 661/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 662/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00662: ReduceLROnPlateau reducing learning rate to 5.421010741258487e-22.
Epoch 663/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 664/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 665/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 666/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 667/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 668/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 669/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 670/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 671/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 672/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00672: ReduceLROnPlateau reducing learning rate to 2.7105053706292436e-22.
Epoch 673/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 674/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 675/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 676/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 677/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 678/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 679/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 680/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 681/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 682/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00682: ReduceLROnPlateau reducing learning rate to 1.3552526853146218e-22.
Epoch 683/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 684/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 685/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 686/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 687/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 688/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 689/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 690/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 691/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 692/2000
279/279 [==============================] - 0s 133us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00692: ReduceLROnPlateau reducing learning rate to 6.776263426573109e-23.
Epoch 693/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 694/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 695/2000
279/279 [==============================] - 0s 122us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 696/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 697/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 698/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 699/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 700/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 701/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 702/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00702: ReduceLROnPlateau reducing learning rate to 3.3881317132865545e-23.
Epoch 703/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 704/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 705/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 706/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 707/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 708/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 709/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 710/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 711/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 712/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00712: ReduceLROnPlateau reducing learning rate to 1.6940658566432772e-23.
Epoch 713/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 714/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 715/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 716/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 717/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 718/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 719/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 720/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 721/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 722/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00722: ReduceLROnPlateau reducing learning rate to 8.470329283216386e-24.
Epoch 723/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 724/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 725/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 726/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 727/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 728/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 729/2000
279/279 [==============================] - ETA: 0s - loss: 0.0332 - accuracy: 1.00 - 0s 125us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 730/2000
279/279 [==============================] - 0s 125us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 731/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 732/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00732: ReduceLROnPlateau reducing learning rate to 4.235164641608193e-24.
Epoch 733/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 734/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 735/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 736/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 737/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 738/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 739/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 740/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 741/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 742/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00742: ReduceLROnPlateau reducing learning rate to 2.1175823208040965e-24.
Epoch 743/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 744/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 745/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 746/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 747/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 748/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 749/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 750/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 751/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 752/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00752: ReduceLROnPlateau reducing learning rate to 1.0587911604020483e-24.
Epoch 753/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 754/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 755/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 756/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 757/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 758/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 759/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 760/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 761/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 762/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00762: ReduceLROnPlateau reducing learning rate to 5.293955802010241e-25.
Epoch 763/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 764/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 765/2000
279/279 [==============================] - 0s 125us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 766/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 767/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 768/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 769/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 770/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 771/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 772/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00772: ReduceLROnPlateau reducing learning rate to 2.6469779010051207e-25.
Epoch 773/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 774/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 775/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 776/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 777/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 778/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 779/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 780/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 781/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 782/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00782: ReduceLROnPlateau reducing learning rate to 1.3234889505025603e-25.
Epoch 783/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 784/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 785/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 786/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 787/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 788/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 789/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 790/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 791/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 792/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00792: ReduceLROnPlateau reducing learning rate to 6.617444752512802e-26.
Epoch 793/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 794/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 795/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 796/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 797/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 798/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 799/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 800/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 801/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 802/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00802: ReduceLROnPlateau reducing learning rate to 3.308722376256401e-26.
Epoch 803/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 804/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 805/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 806/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 807/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 808/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 809/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 810/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 811/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 812/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00812: ReduceLROnPlateau reducing learning rate to 1.6543611881282004e-26.
Epoch 813/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 814/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 815/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 816/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 817/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 818/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 819/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 820/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 821/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 822/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00822: ReduceLROnPlateau reducing learning rate to 8.271805940641002e-27.
Epoch 823/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 824/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 825/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 826/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 827/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 828/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 829/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 830/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 831/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 832/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00832: ReduceLROnPlateau reducing learning rate to 4.135902970320501e-27.
Epoch 833/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 834/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 835/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 836/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 837/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 838/2000
279/279 [==============================] - ETA: 0s - loss: 0.0285 - accuracy: 1.00 - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 839/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 840/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 841/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 842/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00842: ReduceLROnPlateau reducing learning rate to 2.0679514851602505e-27.
Epoch 843/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 844/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 845/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 846/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 847/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 848/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 849/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 850/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 851/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 852/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00852: ReduceLROnPlateau reducing learning rate to 1.0339757425801253e-27.
Epoch 853/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 854/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 855/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 856/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 857/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 858/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 859/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 860/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 861/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 862/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00862: ReduceLROnPlateau reducing learning rate to 5.169878712900626e-28.
Epoch 863/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 864/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 865/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 866/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 867/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 868/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 869/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 870/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 871/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 872/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00872: ReduceLROnPlateau reducing learning rate to 2.584939356450313e-28.
Epoch 873/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 874/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 875/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 876/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 877/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 878/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 879/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 880/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 881/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 882/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00882: ReduceLROnPlateau reducing learning rate to 1.2924696782251566e-28.
Epoch 883/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 884/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 885/2000
279/279 [==============================] - ETA: 0s - loss: 0.0114 - accuracy: 1.00 - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 886/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 887/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 888/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 889/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 890/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 891/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 892/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00892: ReduceLROnPlateau reducing learning rate to 6.462348391125783e-29.
Epoch 893/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 894/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 895/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 896/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 897/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 898/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 899/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 900/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 901/2000
279/279 [==============================] - 0s 133us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 902/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00902: ReduceLROnPlateau reducing learning rate to 3.2311741955628914e-29.
Epoch 903/2000
279/279 [==============================] - 0s 122us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 904/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 905/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 906/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 907/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 908/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 909/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 910/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 911/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 912/2000
279/279 [==============================] - 0s 129us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00912: ReduceLROnPlateau reducing learning rate to 1.6155870977814457e-29.
Epoch 913/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 914/2000
279/279 [==============================] - 0s 133us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 915/2000
279/279 [==============================] - 0s 125us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 916/2000
279/279 [==============================] - 0s 122us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 917/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 918/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 919/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 920/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 921/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 922/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00922: ReduceLROnPlateau reducing learning rate to 8.077935488907229e-30.
Epoch 923/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 924/2000
279/279 [==============================] - ETA: 0s - loss: 0.0248 - accuracy: 1.00 - 0s 122us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 925/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 926/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 927/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 928/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 929/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 930/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 931/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 932/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00932: ReduceLROnPlateau reducing learning rate to 4.038967744453614e-30.
Epoch 933/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 934/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 935/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 936/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 937/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 938/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 939/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 940/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 941/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 942/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00942: ReduceLROnPlateau reducing learning rate to 2.019483872226807e-30.
Epoch 943/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 944/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 945/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 946/2000
279/279 [==============================] - 0s 125us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 947/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 948/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 949/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 950/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 951/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 952/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00952: ReduceLROnPlateau reducing learning rate to 1.0097419361134036e-30.
Epoch 953/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 954/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 955/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 956/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 957/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 958/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 959/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 960/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 961/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 962/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00962: ReduceLROnPlateau reducing learning rate to 5.048709680567018e-31.
Epoch 963/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 964/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 965/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 966/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 967/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 968/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 969/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 970/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 971/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 972/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00972: ReduceLROnPlateau reducing learning rate to 2.524354840283509e-31.
Epoch 973/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 974/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 975/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 976/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 977/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 978/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 979/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 980/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 981/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 982/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00982: ReduceLROnPlateau reducing learning rate to 1.2621774201417545e-31.
Epoch 983/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 984/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 985/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 986/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 987/2000
279/279 [==============================] - 0s 122us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 988/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 989/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 990/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 991/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 992/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 00992: ReduceLROnPlateau reducing learning rate to 6.310887100708772e-32.
Epoch 993/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 994/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 995/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 996/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 997/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 998/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 999/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1000/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1001/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1002/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01002: ReduceLROnPlateau reducing learning rate to 3.155443550354386e-32.
Epoch 1003/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1004/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1005/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1006/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1007/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1008/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1009/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1010/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1011/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1012/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01012: ReduceLROnPlateau reducing learning rate to 1.577721775177193e-32.
Epoch 1013/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1014/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1015/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1016/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1017/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1018/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1019/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1020/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1021/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1022/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01022: ReduceLROnPlateau reducing learning rate to 7.888608875885965e-33.
Epoch 1023/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1024/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1025/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1026/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1027/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1028/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1029/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1030/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1031/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1032/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01032: ReduceLROnPlateau reducing learning rate to 3.944304437942983e-33.
Epoch 1033/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1034/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1035/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1036/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1037/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1038/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1039/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1040/2000
279/279 [==============================] - 0s 290us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1041/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1042/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01042: ReduceLROnPlateau reducing learning rate to 1.9721522189714914e-33.
Epoch 1043/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1044/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1045/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1046/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1047/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1048/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1049/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1050/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1051/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1052/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01052: ReduceLROnPlateau reducing learning rate to 9.860761094857457e-34.
Epoch 1053/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1054/2000
279/279 [==============================] - 0s 143us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1055/2000
279/279 [==============================] - ETA: 0s - loss: 0.0338 - accuracy: 1.00 - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1056/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1057/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1058/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1059/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1060/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1061/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1062/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01062: ReduceLROnPlateau reducing learning rate to 4.930380547428728e-34.
Epoch 1063/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1064/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1065/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1066/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1067/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1068/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1069/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1070/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1071/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1072/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01072: ReduceLROnPlateau reducing learning rate to 2.465190273714364e-34.
Epoch 1073/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1074/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1075/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1076/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1077/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1078/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1079/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1080/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1081/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1082/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01082: ReduceLROnPlateau reducing learning rate to 1.232595136857182e-34.
Epoch 1083/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1084/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1085/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1086/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1087/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1088/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1089/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1090/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1091/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1092/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01092: ReduceLROnPlateau reducing learning rate to 6.16297568428591e-35.
Epoch 1093/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1094/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1095/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1096/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1097/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1098/2000
279/279 [==============================] - ETA: 0s - loss: 0.0289 - accuracy: 1.00 - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1099/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1100/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1101/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1102/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01102: ReduceLROnPlateau reducing learning rate to 3.081487842142955e-35.
Epoch 1103/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1104/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1105/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1106/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1107/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1108/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1109/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1110/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1111/2000
279/279 [==============================] - 0s 122us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1112/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01112: ReduceLROnPlateau reducing learning rate to 1.5407439210714776e-35.
Epoch 1113/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1114/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1115/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1116/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1117/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1118/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1119/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1120/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1121/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1122/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01122: ReduceLROnPlateau reducing learning rate to 7.703719605357388e-36.
Epoch 1123/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1124/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1125/2000
279/279 [==============================] - 0s 125us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1126/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1127/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1128/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1129/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1130/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1131/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1132/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01132: ReduceLROnPlateau reducing learning rate to 3.851859802678694e-36.
Epoch 1133/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1134/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1135/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1136/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1137/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1138/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1139/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1140/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1141/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1142/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01142: ReduceLROnPlateau reducing learning rate to 1.925929901339347e-36.
Epoch 1143/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1144/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1145/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1146/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1147/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1148/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1149/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1150/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1151/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1152/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01152: ReduceLROnPlateau reducing learning rate to 9.629649506696735e-37.
Epoch 1153/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1154/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1155/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1156/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1157/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1158/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1159/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1160/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1161/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1162/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01162: ReduceLROnPlateau reducing learning rate to 4.8148247533483676e-37.
Epoch 1163/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1164/2000
279/279 [==============================] - 0s 133us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1165/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1166/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1167/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1168/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1169/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1170/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1171/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1172/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01172: ReduceLROnPlateau reducing learning rate to 2.4074123766741838e-37.
Epoch 1173/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1174/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1175/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1176/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1177/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1178/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1179/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1180/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1181/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1182/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01182: ReduceLROnPlateau reducing learning rate to 1.2037061883370919e-37.
Epoch 1183/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1184/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1185/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1186/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1187/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1188/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1189/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1190/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1191/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1192/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01192: ReduceLROnPlateau reducing learning rate to 6.018530941685459e-38.
Epoch 1193/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1194/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1195/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1196/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1197/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1198/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1199/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1200/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1201/2000
279/279 [==============================] - 0s 125us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1202/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01202: ReduceLROnPlateau reducing learning rate to 3.0092654708427297e-38.
Epoch 1203/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1204/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1205/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1206/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1207/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1208/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1209/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1210/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1211/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1212/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01212: ReduceLROnPlateau reducing learning rate to 1.5046327354213649e-38.
Epoch 1213/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1214/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1215/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1216/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1217/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1218/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1219/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1220/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1221/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1222/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01222: ReduceLROnPlateau reducing learning rate to 7.523163677106824e-39.
Epoch 1223/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1224/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1225/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1226/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1227/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1228/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1229/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1230/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1231/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1232/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01232: ReduceLROnPlateau reducing learning rate to 3.761581838553412e-39.
Epoch 1233/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1234/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1235/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1236/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1237/2000
279/279 [==============================] - 0s 122us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1238/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1239/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1240/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1241/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1242/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01242: ReduceLROnPlateau reducing learning rate to 1.88079056895209e-39.
Epoch 1243/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1244/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1245/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1246/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1247/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1248/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1249/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1250/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1251/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1252/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01252: ReduceLROnPlateau reducing learning rate to 9.40395284476045e-40.
Epoch 1253/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1254/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1255/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1256/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1257/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1258/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1259/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1260/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1261/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1262/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01262: ReduceLROnPlateau reducing learning rate to 4.701972919134064e-40.
Epoch 1263/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1264/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1265/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1266/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1267/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1268/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1269/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1270/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1271/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1272/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01272: ReduceLROnPlateau reducing learning rate to 2.350986459567032e-40.
Epoch 1273/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1274/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1275/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1276/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1277/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1278/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1279/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1280/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1281/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1282/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01282: ReduceLROnPlateau reducing learning rate to 1.175493229783516e-40.
Epoch 1283/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1284/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1285/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1286/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1287/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1288/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1289/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1290/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1291/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1292/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01292: ReduceLROnPlateau reducing learning rate to 5.87746614891758e-41.
Epoch 1293/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1294/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1295/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1296/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1297/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1298/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1299/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1300/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1301/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1302/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01302: ReduceLROnPlateau reducing learning rate to 2.93873307445879e-41.
Epoch 1303/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1304/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1305/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1306/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1307/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1308/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1309/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1310/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1311/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1312/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01312: ReduceLROnPlateau reducing learning rate to 1.4694015696910032e-41.
Epoch 1313/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1314/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1315/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1316/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1317/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1318/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1319/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1320/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1321/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1322/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01322: ReduceLROnPlateau reducing learning rate to 7.347007848455016e-42.
Epoch 1323/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1324/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1325/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1326/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1327/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1328/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1329/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1330/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1331/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1332/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01332: ReduceLROnPlateau reducing learning rate to 3.673503924227508e-42.
Epoch 1333/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1334/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1335/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1336/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1337/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1338/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1339/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1340/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1341/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1342/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01342: ReduceLROnPlateau reducing learning rate to 1.8371022867298352e-42.
Epoch 1343/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1344/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1345/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1346/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1347/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1348/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1349/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1350/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1351/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1352/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01352: ReduceLROnPlateau reducing learning rate to 9.185511433649176e-43.
Epoch 1353/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1354/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1355/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1356/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1357/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1358/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1359/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1360/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1361/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1362/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01362: ReduceLROnPlateau reducing learning rate to 4.5962589629854e-43.
Epoch 1363/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1364/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1365/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1366/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1367/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1368/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1369/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1370/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1371/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1372/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01372: ReduceLROnPlateau reducing learning rate to 2.2981294814927e-43.
Epoch 1373/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1374/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1375/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1376/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1377/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1378/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1379/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1380/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1381/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1382/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01382: ReduceLROnPlateau reducing learning rate to 1.14906474074635e-43.
Epoch 1383/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1384/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1385/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1386/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1387/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1388/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1389/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1390/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1391/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1392/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01392: ReduceLROnPlateau reducing learning rate to 5.74532370373175e-44.
Epoch 1393/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1394/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1395/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1396/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1397/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1398/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1399/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1400/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1401/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1402/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01402: ReduceLROnPlateau reducing learning rate to 2.872661851865875e-44.
Epoch 1403/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1404/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1405/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1406/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1407/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1408/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1409/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1410/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1411/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1412/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01412: ReduceLROnPlateau reducing learning rate to 1.401298464324817e-44.
Epoch 1413/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1414/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1415/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1416/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1417/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1418/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1419/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1420/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1421/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1422/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01422: ReduceLROnPlateau reducing learning rate to 7.006492321624085e-45.
Epoch 1423/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1424/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1425/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1426/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1427/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1428/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1429/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1430/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1431/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1432/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01432: ReduceLROnPlateau reducing learning rate to 3.5032461608120427e-45.
Epoch 1433/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1434/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1435/2000
279/279 [==============================] - 0s 122us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1436/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1437/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1438/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1439/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1440/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1441/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1442/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01442: ReduceLROnPlateau reducing learning rate to 1.401298464324817e-45.
Epoch 1443/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1444/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1445/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1446/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1447/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1448/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1449/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1450/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1451/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1452/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634

Epoch 01452: ReduceLROnPlateau reducing learning rate to 7.006492321624085e-46.
Epoch 1453/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1454/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1455/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1456/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1457/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1458/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1459/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1460/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1461/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1462/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1463/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1464/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1465/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1466/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1467/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1468/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1469/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1470/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1471/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1472/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1473/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1474/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1475/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1476/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1477/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1478/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1479/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1480/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1481/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1482/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1483/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1484/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1485/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1486/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1487/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1488/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1489/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1490/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1491/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1492/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1493/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1494/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1495/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1496/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1497/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1498/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1499/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1500/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1501/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1502/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1503/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1504/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1505/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1506/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1507/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1508/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1509/2000
279/279 [==============================] - 0s 125us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1510/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1511/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1512/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1513/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1514/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1515/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1516/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1517/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1518/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1519/2000
279/279 [==============================] - ETA: 0s - loss: 0.0344 - accuracy: 1.00 - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1520/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1521/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1522/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1523/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1524/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1525/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1526/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1527/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1528/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1529/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1530/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1531/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1532/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1533/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1534/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1535/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1536/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1537/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1538/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1539/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1540/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1541/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1542/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1543/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1544/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1545/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1546/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1547/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1548/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1549/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1550/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1551/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1552/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1553/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1554/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1555/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1556/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1557/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1558/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1559/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1560/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1561/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1562/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1563/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1564/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1565/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1566/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1567/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1568/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1569/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1570/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1571/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1572/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1573/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1574/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1575/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1576/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1577/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1578/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1579/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1580/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1581/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1582/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1583/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1584/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1585/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1586/2000
279/279 [==============================] - ETA: 0s - loss: 0.0197 - accuracy: 1.00 - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1587/2000
279/279 [==============================] - 0s 147us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1588/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1589/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1590/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1591/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1592/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1593/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1594/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1595/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1596/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1597/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1598/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1599/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1600/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1601/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1602/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1603/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1604/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1605/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1606/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1607/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1608/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1609/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1610/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1611/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1612/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1613/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1614/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1615/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1616/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1617/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1618/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1619/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1620/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1621/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1622/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1623/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1624/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1625/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1626/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1627/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1628/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1629/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1630/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1631/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1632/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1633/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1634/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1635/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1636/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1637/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1638/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1639/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1640/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1641/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1642/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1643/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1644/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1645/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1646/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1647/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1648/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1649/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1650/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1651/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1652/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1653/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1654/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1655/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1656/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1657/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1658/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1659/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1660/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1661/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1662/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1663/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1664/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1665/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1666/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1667/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1668/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1669/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1670/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1671/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1672/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1673/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1674/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1675/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1676/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1677/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1678/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1679/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1680/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1681/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1682/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1683/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1684/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1685/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1686/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1687/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1688/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1689/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1690/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1691/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1692/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1693/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1694/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1695/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1696/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1697/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1698/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1699/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1700/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1701/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1702/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1703/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1704/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1705/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1706/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1707/2000
279/279 [==============================] - 0s 72us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1708/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1709/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1710/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1711/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1712/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1713/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1714/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1715/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1716/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1717/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1718/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1719/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1720/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1721/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1722/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1723/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1724/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1725/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1726/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1727/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1728/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1729/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1730/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1731/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1732/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1733/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1734/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1735/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1736/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1737/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1738/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1739/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1740/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1741/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1742/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1743/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1744/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1745/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1746/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1747/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1748/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1749/2000
279/279 [==============================] - ETA: 0s - loss: 0.0269 - accuracy: 1.00 - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1750/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1751/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1752/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1753/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1754/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1755/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1756/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1757/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1758/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1759/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1760/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1761/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1762/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1763/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1764/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1765/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1766/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1767/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1768/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1769/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1770/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1771/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1772/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1773/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1774/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1775/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1776/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1777/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1778/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1779/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1780/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1781/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1782/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1783/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1784/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1785/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1786/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1787/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1788/2000
279/279 [==============================] - 0s 118us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1789/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1790/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1791/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1792/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1793/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1794/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1795/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1796/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1797/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1798/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1799/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1800/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1801/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1802/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1803/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1804/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1805/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1806/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1807/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1808/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1809/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1810/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1811/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1812/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1813/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1814/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1815/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1816/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1817/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1818/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1819/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1820/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1821/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1822/2000
279/279 [==============================] - 0s 154us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1823/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1824/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1825/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1826/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1827/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1828/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1829/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1830/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1831/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1832/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1833/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1834/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1835/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1836/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1837/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1838/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1839/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1840/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1841/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1842/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1843/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1844/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1845/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1846/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1847/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1848/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1849/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1850/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1851/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1852/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1853/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1854/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1855/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1856/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1857/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1858/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1859/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1860/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1861/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1862/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1863/2000
279/279 [==============================] - 0s 115us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1864/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1865/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1866/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1867/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1868/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1869/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1870/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1871/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1872/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1873/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1874/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1875/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1876/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1877/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1878/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1879/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1880/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1881/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1882/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1883/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1884/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1885/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1886/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1887/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1888/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1889/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1890/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1891/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1892/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1893/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1894/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1895/2000
279/279 [==============================] - 0s 100us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1896/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1897/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1898/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1899/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1900/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1901/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1902/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1903/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1904/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1905/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1906/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1907/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1908/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1909/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1910/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1911/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1912/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1913/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1914/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1915/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1916/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1917/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1918/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1919/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1920/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1921/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1922/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1923/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1924/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1925/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1926/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1927/2000
279/279 [==============================] - ETA: 0s - loss: 0.0263 - accuracy: 1.00 - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1928/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1929/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1930/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1931/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1932/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1933/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1934/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1935/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1936/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1937/2000
279/279 [==============================] - 0s 107us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1938/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1939/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1940/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1941/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1942/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1943/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1944/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1945/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1946/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1947/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1948/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1949/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1950/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1951/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1952/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1953/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1954/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1955/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1956/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1957/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1958/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1959/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1960/2000
279/279 [==============================] - 0s 97us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1961/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1962/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1963/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1964/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1965/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1966/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1967/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1968/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1969/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1970/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1971/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1972/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1973/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1974/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1975/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1976/2000
279/279 [==============================] - 0s 111us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1977/2000
279/279 [==============================] - 0s 104us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1978/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1979/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1980/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1981/2000
279/279 [==============================] - 0s 75us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1982/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1983/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1984/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1985/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1986/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1987/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1988/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1989/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1990/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1991/2000
279/279 [==============================] - 0s 93us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1992/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1993/2000
279/279 [==============================] - 0s 86us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1994/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1995/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1996/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1997/2000
279/279 [==============================] - 0s 82us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1998/2000
279/279 [==============================] - 0s 88us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 1999/2000
279/279 [==============================] - 0s 90us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
Epoch 2000/2000
279/279 [==============================] - 0s 79us/step - loss: 0.0262 - accuracy: 1.0000 - val_loss: 0.9480 - val_accuracy: 0.7634
In [213]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 2000)
In [214]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
93/93 [==============================] - 0s 54us/step
test loss: 0.9480327226782358, test accuracy: 0.7634408473968506
In [215]:
y_pred = model.predict(X_test)
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
Kappa:  0.28059071729957796
AUC ROC:  0.6239130434782609

KMeans

In [216]:
X
Out[216]:
tonalcentroidfiles_1 tonalcentroidfiles_2 tonalcentroidfiles_3 tonalcentroidfiles_4 tonalcentroidfiles_5 tonalcentroidfiles_6
0 1.574572 0.885785 -0.744040 1.222732 -1.020937 0.253925
1 1.110821 0.925768 0.226451 -0.119687 0.143401 -0.009902
2 -0.106107 1.808295 1.961049 1.107464 2.076449 1.966210
3 0.083078 1.385239 1.506771 1.294360 1.104665 2.105667
4 -0.164331 0.169248 0.525026 1.442347 1.710639 0.657093
5 -0.601767 0.100025 2.894764 -1.234721 1.277722 2.254758
6 -0.436923 -0.300019 1.146480 0.730610 -1.634206 -0.621955
7 -0.587280 1.210009 0.829948 0.235398 -0.830262 0.277419
8 -0.072320 0.439239 -0.084262 0.666161 -0.979709 0.134482
9 0.177494 0.386052 0.266785 -1.461050 1.702079 -0.627335
10 0.473878 0.893926 -0.138418 -0.267275 -0.407548 -0.229186
11 0.245249 0.482974 0.995106 0.679754 0.235560 0.480101
12 0.011981 -0.373717 -0.589054 0.487517 -1.428960 0.073724
13 0.190041 -0.273603 0.483229 0.925167 -1.268062 -0.057357
14 -1.064192 -0.043564 -1.313412 -1.204309 1.571772 -1.751836
15 -0.197842 0.773898 0.917595 -0.533388 1.883323 -0.803595
16 -0.610344 0.615674 0.638901 -1.525221 1.272377 -1.132221
17 0.804488 -1.479012 0.374228 0.166272 -1.743433 -1.002346
18 0.490692 -1.982800 -0.516405 -0.202546 -1.666137 -1.170162
19 0.592053 -1.492906 0.420008 -0.901877 -2.327543 -0.070308
20 -1.787738 1.285484 -0.785859 -2.380832 -1.022434 -0.395605
21 -1.742165 -0.033766 -0.693835 -0.641834 0.381590 -1.954523
22 -1.729041 0.173705 -1.150118 -0.130491 -1.173120 -1.443805
23 0.437142 1.722799 -2.129021 -2.481456 -0.156650 0.254809
24 1.350380 0.970678 0.076009 -0.404025 -1.384857 0.117089
25 0.496482 -0.133100 -0.887460 0.472889 -1.490365 1.615562
26 0.364827 1.228853 -0.931602 -0.240277 -0.555015 1.259771
27 1.022426 1.569202 -1.345165 -1.077121 -0.192695 0.678057
28 0.458228 1.620487 -0.211045 -1.256812 0.846741 -0.038512
29 0.607951 1.683390 -0.591685 -0.243413 -0.937265 2.063508
... ... ... ... ... ... ...
342 0.736233 -1.887137 0.319724 -0.489954 -0.346230 -0.577137
343 2.234360 -0.046651 1.657368 -1.085388 0.721768 -1.665035
344 0.911113 0.039319 -1.297534 0.592375 -0.241987 2.261413
345 1.384636 -0.476054 -1.183101 0.367974 0.257066 1.378080
346 0.961181 0.071015 -0.810140 0.868325 0.332780 1.588246
347 0.937051 -1.450311 -0.319702 -0.228450 0.739750 -0.449282
348 -3.281761 0.179415 -1.446642 -0.685115 -0.600794 0.658153
349 -2.344820 0.180118 0.106773 0.037180 -0.877178 1.123398
350 1.460135 0.906456 -0.147713 0.343900 -0.620485 0.700956
351 2.114961 0.965145 -1.378351 -0.574489 -1.693320 -0.016307
352 1.504768 0.846661 -1.860231 0.705179 -1.292241 0.593433
353 -0.213580 0.437840 0.427356 1.095762 -1.001309 -0.054969
354 -0.063245 0.024794 0.060996 0.129995 -0.599389 0.500376
355 0.116032 0.004823 0.235041 0.085659 -0.431386 0.814703
356 -0.330558 0.187426 -0.234587 0.942953 -0.434096 0.203910
357 0.380368 0.608654 -0.308912 1.154966 -0.255135 0.167498
358 0.893056 0.389669 -0.426997 0.638788 -0.700280 0.368667
359 -1.019517 -2.497618 0.166376 1.273368 0.313702 -0.420230
360 -0.784247 -1.148191 -0.802374 1.038236 -0.602589 -0.913446
361 0.139494 -2.036594 -1.137199 -0.379348 -0.257913 0.506162
362 1.494839 -2.022204 -0.164524 -2.180060 0.002713 1.550614
363 3.128156 -1.231830 0.035160 -2.785380 -0.610055 0.724620
364 2.010326 -1.482568 -1.697983 -2.728569 -0.765820 2.873139
365 1.499390 0.641291 -0.739018 -1.456660 -0.760400 -0.452027
366 1.398136 1.715250 -0.369182 -1.280480 -0.150680 -0.884280
367 1.103318 0.778728 -0.851121 -1.368219 0.142626 -0.918794
368 0.303168 0.188358 0.095953 -0.024506 -0.709672 -1.109607
369 1.183673 0.747660 -0.209307 0.329011 -1.151082 -0.726250
370 -0.723654 -0.290377 1.173636 -0.123624 1.997744 -0.687810
371 -0.073840 1.011128 0.445136 0.821330 -0.338478 -0.694080

372 rows × 6 columns

In [217]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[217]:
[2232.0,
 1860.320016741722,
 1609.923817414493,
 1413.3293251834796,
 1299.2594621943642,
 1187.9595776711437,
 1107.911820622218,
 1042.5800837127117,
 995.2457858732439,
 962.2198115266042,
 937.080734625301,
 898.72618782214,
 866.9993526105154,
 833.7799456003584]
In [218]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[218]:
[<matplotlib.lines.Line2D at 0x1e82f9e3828>]

K=4

In [219]:
kmeans_ch = KMeans(n_clusters=4, random_state=0, n_init=10)
kmeans_ch.fit(X)
Out[219]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=4, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [220]:
kmeans_ch.labels_
Out[220]:
array([2, 2, 0, 0, 0, 0, 1, 0, 0, 3, 2, 0, 1, 1, 3, 3, 3, 1, 1, 1, 3, 3,
       1, 3, 2, 2, 2, 2, 3, 2, 0, 0, 3, 3, 1, 1, 1, 1, 1, 1, 0, 0, 0, 3,
       3, 3, 2, 2, 2, 2, 2, 0, 1, 1, 0, 0, 0, 2, 2, 2, 3, 1, 3, 1, 3, 3,
       0, 0, 0, 3, 3, 3, 3, 2, 0, 2, 2, 0, 0, 0, 0, 0, 0, 2, 3, 3, 0, 0,
       2, 1, 0, 3, 2, 1, 1, 2, 0, 3, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0,
       3, 0, 0, 0, 3, 0, 2, 2, 2, 0, 3, 2, 0, 3, 2, 3, 1, 1, 1, 0, 2, 0,
       3, 3, 3, 3, 1, 2, 0, 1, 0, 1, 1, 2, 1, 0, 1, 1, 0, 0, 0, 3, 0, 2,
       0, 0, 0, 1, 1, 2, 0, 0, 0, 1, 1, 1, 2, 2, 1, 3, 2, 3, 0, 1, 1, 2,
       0, 1, 1, 1, 1, 2, 1, 2, 2, 3, 3, 0, 0, 1, 1, 0, 2, 2, 2, 2, 2, 1,
       2, 1, 1, 3, 0, 1, 2, 3, 2, 2, 3, 2, 0, 2, 2, 1, 0, 1, 0, 1, 2, 1,
       2, 3, 3, 3, 2, 2, 2, 1, 2, 1, 1, 2, 1, 2, 2, 3, 2, 3, 3, 3, 3, 3,
       3, 3, 3, 1, 0, 0, 2, 2, 2, 1, 2, 2, 3, 3, 3, 1, 2, 2, 3, 3, 3, 3,
       2, 1, 1, 1, 3, 3, 1, 1, 1, 2, 2, 1, 3, 2, 0, 2, 2, 3, 3, 3, 3, 1,
       0, 2, 2, 3, 2, 0, 3, 1, 3, 2, 1, 0, 0, 3, 0, 0, 0, 2, 0, 1, 1, 0,
       0, 0, 3, 0, 0, 1, 1, 3, 1, 1, 3, 0, 2, 2, 1, 1, 1, 1, 1, 1, 2, 0,
       0, 3, 0, 0, 0, 2, 2, 3, 2, 1, 1, 1, 1, 3, 2, 2, 2, 1, 0, 0, 2, 2,
       2, 0, 0, 0, 0, 2, 2, 1, 1, 1, 2, 2, 2, 2, 3, 3, 1, 2, 3, 0])
In [221]:
clusters_ch = kmeans_ch.predict(X)
clusters_ch
Out[221]:
array([2, 2, 0, 0, 0, 0, 1, 0, 0, 3, 2, 0, 1, 1, 3, 3, 3, 1, 1, 1, 3, 3,
       1, 3, 2, 2, 2, 2, 3, 2, 0, 0, 3, 3, 1, 1, 1, 1, 1, 1, 0, 0, 0, 3,
       3, 3, 2, 2, 2, 2, 2, 0, 1, 1, 0, 0, 0, 2, 2, 2, 3, 1, 3, 1, 3, 3,
       0, 0, 0, 3, 3, 3, 3, 2, 0, 2, 2, 0, 0, 0, 0, 0, 0, 2, 3, 3, 0, 0,
       2, 1, 0, 3, 2, 1, 1, 2, 0, 3, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0,
       3, 0, 0, 0, 3, 0, 2, 2, 2, 0, 3, 2, 0, 3, 2, 3, 1, 1, 1, 0, 2, 0,
       3, 3, 3, 3, 1, 2, 0, 1, 0, 1, 1, 2, 1, 0, 1, 1, 0, 0, 0, 3, 0, 2,
       0, 0, 0, 1, 1, 2, 0, 0, 0, 1, 1, 1, 2, 2, 1, 3, 2, 3, 0, 1, 1, 2,
       0, 1, 1, 1, 1, 2, 1, 2, 2, 3, 3, 0, 0, 1, 1, 0, 2, 2, 2, 2, 2, 1,
       2, 1, 1, 3, 0, 1, 2, 3, 2, 2, 3, 2, 0, 2, 2, 1, 0, 1, 0, 1, 2, 1,
       2, 3, 3, 3, 2, 2, 2, 1, 2, 1, 1, 2, 1, 2, 2, 3, 2, 3, 3, 3, 3, 3,
       3, 3, 3, 1, 0, 0, 2, 2, 2, 1, 2, 2, 3, 3, 3, 1, 2, 2, 3, 3, 3, 3,
       2, 1, 1, 1, 3, 3, 1, 1, 1, 2, 2, 1, 3, 2, 0, 2, 2, 3, 3, 3, 3, 1,
       0, 2, 2, 3, 2, 0, 3, 1, 3, 2, 1, 0, 0, 3, 0, 0, 0, 2, 0, 1, 1, 0,
       0, 0, 3, 0, 0, 1, 1, 3, 1, 1, 3, 0, 2, 2, 1, 1, 1, 1, 1, 1, 2, 0,
       0, 3, 0, 0, 0, 2, 2, 3, 2, 1, 1, 1, 1, 3, 2, 2, 2, 1, 0, 0, 2, 2,
       2, 0, 0, 0, 0, 2, 2, 1, 1, 1, 2, 2, 2, 2, 3, 3, 1, 2, 3, 0])
In [222]:
X.loc[:,'Cluster'] = clusters_ch
X.loc[:,'chosen'] = list(y)
In [223]:
X
Out[223]:
tonalcentroidfiles_1 tonalcentroidfiles_2 tonalcentroidfiles_3 tonalcentroidfiles_4 tonalcentroidfiles_5 tonalcentroidfiles_6 Cluster chosen
0 1.574572 0.885785 -0.744040 1.222732 -1.020937 0.253925 2 0
1 1.110821 0.925768 0.226451 -0.119687 0.143401 -0.009902 2 0
2 -0.106107 1.808295 1.961049 1.107464 2.076449 1.966210 0 0
3 0.083078 1.385239 1.506771 1.294360 1.104665 2.105667 0 0
4 -0.164331 0.169248 0.525026 1.442347 1.710639 0.657093 0 0
5 -0.601767 0.100025 2.894764 -1.234721 1.277722 2.254758 0 0
6 -0.436923 -0.300019 1.146480 0.730610 -1.634206 -0.621955 1 0
7 -0.587280 1.210009 0.829948 0.235398 -0.830262 0.277419 0 0
8 -0.072320 0.439239 -0.084262 0.666161 -0.979709 0.134482 0 0
9 0.177494 0.386052 0.266785 -1.461050 1.702079 -0.627335 3 0
10 0.473878 0.893926 -0.138418 -0.267275 -0.407548 -0.229186 2 0
11 0.245249 0.482974 0.995106 0.679754 0.235560 0.480101 0 0
12 0.011981 -0.373717 -0.589054 0.487517 -1.428960 0.073724 1 0
13 0.190041 -0.273603 0.483229 0.925167 -1.268062 -0.057357 1 0
14 -1.064192 -0.043564 -1.313412 -1.204309 1.571772 -1.751836 3 0
15 -0.197842 0.773898 0.917595 -0.533388 1.883323 -0.803595 3 0
16 -0.610344 0.615674 0.638901 -1.525221 1.272377 -1.132221 3 0
17 0.804488 -1.479012 0.374228 0.166272 -1.743433 -1.002346 1 0
18 0.490692 -1.982800 -0.516405 -0.202546 -1.666137 -1.170162 1 0
19 0.592053 -1.492906 0.420008 -0.901877 -2.327543 -0.070308 1 0
20 -1.787738 1.285484 -0.785859 -2.380832 -1.022434 -0.395605 3 0
21 -1.742165 -0.033766 -0.693835 -0.641834 0.381590 -1.954523 3 0
22 -1.729041 0.173705 -1.150118 -0.130491 -1.173120 -1.443805 1 0
23 0.437142 1.722799 -2.129021 -2.481456 -0.156650 0.254809 3 0
24 1.350380 0.970678 0.076009 -0.404025 -1.384857 0.117089 2 0
25 0.496482 -0.133100 -0.887460 0.472889 -1.490365 1.615562 2 0
26 0.364827 1.228853 -0.931602 -0.240277 -0.555015 1.259771 2 0
27 1.022426 1.569202 -1.345165 -1.077121 -0.192695 0.678057 2 0
28 0.458228 1.620487 -0.211045 -1.256812 0.846741 -0.038512 3 0
29 0.607951 1.683390 -0.591685 -0.243413 -0.937265 2.063508 2 0
... ... ... ... ... ... ... ... ...
342 0.736233 -1.887137 0.319724 -0.489954 -0.346230 -0.577137 1 1
343 2.234360 -0.046651 1.657368 -1.085388 0.721768 -1.665035 3 1
344 0.911113 0.039319 -1.297534 0.592375 -0.241987 2.261413 2 1
345 1.384636 -0.476054 -1.183101 0.367974 0.257066 1.378080 2 1
346 0.961181 0.071015 -0.810140 0.868325 0.332780 1.588246 2 1
347 0.937051 -1.450311 -0.319702 -0.228450 0.739750 -0.449282 1 1
348 -3.281761 0.179415 -1.446642 -0.685115 -0.600794 0.658153 0 1
349 -2.344820 0.180118 0.106773 0.037180 -0.877178 1.123398 0 1
350 1.460135 0.906456 -0.147713 0.343900 -0.620485 0.700956 2 1
351 2.114961 0.965145 -1.378351 -0.574489 -1.693320 -0.016307 2 1
352 1.504768 0.846661 -1.860231 0.705179 -1.292241 0.593433 2 1
353 -0.213580 0.437840 0.427356 1.095762 -1.001309 -0.054969 0 1
354 -0.063245 0.024794 0.060996 0.129995 -0.599389 0.500376 0 1
355 0.116032 0.004823 0.235041 0.085659 -0.431386 0.814703 0 1
356 -0.330558 0.187426 -0.234587 0.942953 -0.434096 0.203910 0 1
357 0.380368 0.608654 -0.308912 1.154966 -0.255135 0.167498 2 1
358 0.893056 0.389669 -0.426997 0.638788 -0.700280 0.368667 2 1
359 -1.019517 -2.497618 0.166376 1.273368 0.313702 -0.420230 1 1
360 -0.784247 -1.148191 -0.802374 1.038236 -0.602589 -0.913446 1 1
361 0.139494 -2.036594 -1.137199 -0.379348 -0.257913 0.506162 1 1
362 1.494839 -2.022204 -0.164524 -2.180060 0.002713 1.550614 2 1
363 3.128156 -1.231830 0.035160 -2.785380 -0.610055 0.724620 2 1
364 2.010326 -1.482568 -1.697983 -2.728569 -0.765820 2.873139 2 1
365 1.499390 0.641291 -0.739018 -1.456660 -0.760400 -0.452027 2 1
366 1.398136 1.715250 -0.369182 -1.280480 -0.150680 -0.884280 3 1
367 1.103318 0.778728 -0.851121 -1.368219 0.142626 -0.918794 3 1
368 0.303168 0.188358 0.095953 -0.024506 -0.709672 -1.109607 1 1
369 1.183673 0.747660 -0.209307 0.329011 -1.151082 -0.726250 2 1
370 -0.723654 -0.290377 1.173636 -0.123624 1.997744 -0.687810 3 1
371 -0.073840 1.011128 0.445136 0.821330 -0.338478 -0.694080 0 1

372 rows × 8 columns

In [224]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[224]:
<matplotlib.axes._subplots.AxesSubplot at 0x1e82fa1f9e8>
In [206]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[1]))

Club De Banqueros y Empresarios

ANN

In [225]:
X = df_n_ps_std_ch[1]
In [226]:
y = df_n_ps[1]['chosen']
In [227]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [228]:
X_train.shape
Out[228]:
(191, 6)
In [229]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [230]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [231]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [232]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'logistic', 'hidden_layer_sizes': (10,), 'learning_rate_init': 0.004, 'max_iter': 400}, que permiten obtener un Accuracy de 77.49% y un Kappa del 29.13
Tiempo total: 27.26 minutos
In [233]:
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [234]:
input_tensor = Input(shape = (n0,))
In [235]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = 'tanh')(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [236]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [237]:
model.summary()
Model: "model_9"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_9 (InputLayer)         (None, 6)                 0         
_________________________________________________________________
dense_28 (Dense)             (None, 10)                70        
_________________________________________________________________
dense_29 (Dense)             (None, 1)                 11        
=================================================================
Total params: 81
Trainable params: 81
Non-trainable params: 0
_________________________________________________________________
In [238]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), batch_size= 32,
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 191 samples, validate on 64 samples
Epoch 1/400
191/191 [==============================] - 0s 637us/step - loss: 0.7698 - accuracy: 0.5236 - val_loss: 0.7665 - val_accuracy: 0.4844
Epoch 2/400
191/191 [==============================] - 0s 68us/step - loss: 0.7283 - accuracy: 0.5497 - val_loss: 0.7359 - val_accuracy: 0.4844
Epoch 3/400
191/191 [==============================] - 0s 73us/step - loss: 0.6917 - accuracy: 0.5602 - val_loss: 0.7134 - val_accuracy: 0.4844
Epoch 4/400
191/191 [==============================] - 0s 73us/step - loss: 0.6639 - accuracy: 0.5969 - val_loss: 0.6940 - val_accuracy: 0.5156
Epoch 5/400
191/191 [==============================] - 0s 68us/step - loss: 0.6388 - accuracy: 0.6387 - val_loss: 0.6769 - val_accuracy: 0.5312
Epoch 6/400
191/191 [==============================] - 0s 73us/step - loss: 0.6195 - accuracy: 0.6492 - val_loss: 0.6648 - val_accuracy: 0.5625
Epoch 7/400
191/191 [==============================] - 0s 68us/step - loss: 0.6030 - accuracy: 0.6859 - val_loss: 0.6552 - val_accuracy: 0.5938
Epoch 8/400
191/191 [==============================] - 0s 63us/step - loss: 0.5890 - accuracy: 0.7173 - val_loss: 0.6494 - val_accuracy: 0.5781
Epoch 9/400
191/191 [==============================] - 0s 63us/step - loss: 0.5794 - accuracy: 0.7277 - val_loss: 0.6450 - val_accuracy: 0.6250
Epoch 10/400
191/191 [==============================] - 0s 68us/step - loss: 0.5711 - accuracy: 0.7330 - val_loss: 0.6423 - val_accuracy: 0.6250
Epoch 11/400
191/191 [==============================] - 0s 68us/step - loss: 0.5650 - accuracy: 0.7330 - val_loss: 0.6408 - val_accuracy: 0.6406
Epoch 12/400
191/191 [==============================] - 0s 68us/step - loss: 0.5593 - accuracy: 0.7330 - val_loss: 0.6408 - val_accuracy: 0.6406
Epoch 13/400
191/191 [==============================] - 0s 68us/step - loss: 0.5559 - accuracy: 0.7382 - val_loss: 0.6398 - val_accuracy: 0.6406
Epoch 14/400
191/191 [==============================] - 0s 63us/step - loss: 0.5528 - accuracy: 0.7382 - val_loss: 0.6403 - val_accuracy: 0.6406
Epoch 15/400
191/191 [==============================] - 0s 63us/step - loss: 0.5501 - accuracy: 0.7435 - val_loss: 0.6411 - val_accuracy: 0.6406
Epoch 16/400
191/191 [==============================] - 0s 63us/step - loss: 0.5477 - accuracy: 0.7382 - val_loss: 0.6409 - val_accuracy: 0.6406
Epoch 17/400
191/191 [==============================] - 0s 63us/step - loss: 0.5464 - accuracy: 0.7382 - val_loss: 0.6396 - val_accuracy: 0.6406
Epoch 18/400
191/191 [==============================] - 0s 68us/step - loss: 0.5444 - accuracy: 0.7435 - val_loss: 0.6407 - val_accuracy: 0.6406
Epoch 19/400
191/191 [==============================] - 0s 89us/step - loss: 0.5432 - accuracy: 0.7487 - val_loss: 0.6406 - val_accuracy: 0.6406
Epoch 20/400
191/191 [==============================] - 0s 84us/step - loss: 0.5419 - accuracy: 0.7539 - val_loss: 0.6414 - val_accuracy: 0.6406
Epoch 21/400
191/191 [==============================] - 0s 68us/step - loss: 0.5412 - accuracy: 0.7539 - val_loss: 0.6434 - val_accuracy: 0.6406

Epoch 00021: ReduceLROnPlateau reducing learning rate to 0.0020000000949949026.
Epoch 22/400
191/191 [==============================] - 0s 68us/step - loss: 0.5401 - accuracy: 0.7539 - val_loss: 0.6438 - val_accuracy: 0.6406
Epoch 23/400
191/191 [==============================] - 0s 63us/step - loss: 0.5393 - accuracy: 0.7592 - val_loss: 0.6438 - val_accuracy: 0.6406
Epoch 24/400
191/191 [==============================] - 0s 73us/step - loss: 0.5390 - accuracy: 0.7592 - val_loss: 0.6436 - val_accuracy: 0.6406
Epoch 25/400
191/191 [==============================] - 0s 68us/step - loss: 0.5384 - accuracy: 0.7592 - val_loss: 0.6437 - val_accuracy: 0.6406
Epoch 26/400
191/191 [==============================] - 0s 68us/step - loss: 0.5379 - accuracy: 0.7592 - val_loss: 0.6440 - val_accuracy: 0.6406
Epoch 27/400
191/191 [==============================] - 0s 79us/step - loss: 0.5375 - accuracy: 0.7539 - val_loss: 0.6440 - val_accuracy: 0.6406
Epoch 28/400
191/191 [==============================] - 0s 68us/step - loss: 0.5370 - accuracy: 0.7487 - val_loss: 0.6441 - val_accuracy: 0.6406
Epoch 29/400
191/191 [==============================] - 0s 68us/step - loss: 0.5367 - accuracy: 0.7539 - val_loss: 0.6440 - val_accuracy: 0.6406
Epoch 30/400
191/191 [==============================] - 0s 73us/step - loss: 0.5362 - accuracy: 0.7539 - val_loss: 0.6439 - val_accuracy: 0.6406
Epoch 31/400
191/191 [==============================] - 0s 78us/step - loss: 0.5357 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6406

Epoch 00031: ReduceLROnPlateau reducing learning rate to 0.0010000000474974513.
Epoch 32/400
191/191 [==============================] - 0s 78us/step - loss: 0.5352 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6406
Epoch 33/400
191/191 [==============================] - 0s 73us/step - loss: 0.5350 - accuracy: 0.7539 - val_loss: 0.6446 - val_accuracy: 0.6406
Epoch 34/400
191/191 [==============================] - 0s 68us/step - loss: 0.5349 - accuracy: 0.7539 - val_loss: 0.6442 - val_accuracy: 0.6562
Epoch 35/400
191/191 [==============================] - 0s 84us/step - loss: 0.5346 - accuracy: 0.7539 - val_loss: 0.6442 - val_accuracy: 0.6406
Epoch 36/400
191/191 [==============================] - 0s 89us/step - loss: 0.5344 - accuracy: 0.7539 - val_loss: 0.6442 - val_accuracy: 0.6406
Epoch 37/400
191/191 [==============================] - 0s 84us/step - loss: 0.5342 - accuracy: 0.7539 - val_loss: 0.6445 - val_accuracy: 0.6406
Epoch 38/400
191/191 [==============================] - 0s 73us/step - loss: 0.5339 - accuracy: 0.7539 - val_loss: 0.6445 - val_accuracy: 0.6406
Epoch 39/400
191/191 [==============================] - 0s 73us/step - loss: 0.5337 - accuracy: 0.7539 - val_loss: 0.6446 - val_accuracy: 0.6406
Epoch 40/400
191/191 [==============================] - 0s 99us/step - loss: 0.5335 - accuracy: 0.7539 - val_loss: 0.6446 - val_accuracy: 0.6406
Epoch 41/400
191/191 [==============================] - 0s 89us/step - loss: 0.5333 - accuracy: 0.7539 - val_loss: 0.6445 - val_accuracy: 0.6406
Epoch 42/400
191/191 [==============================] - 0s 78us/step - loss: 0.5330 - accuracy: 0.7539 - val_loss: 0.6446 - val_accuracy: 0.6406
Epoch 43/400
191/191 [==============================] - 0s 73us/step - loss: 0.5328 - accuracy: 0.7539 - val_loss: 0.6448 - val_accuracy: 0.6406
Epoch 44/400
191/191 [==============================] - 0s 73us/step - loss: 0.5325 - accuracy: 0.7539 - val_loss: 0.6446 - val_accuracy: 0.6406

Epoch 00044: ReduceLROnPlateau reducing learning rate to 0.0005000000237487257.
Epoch 45/400
191/191 [==============================] - 0s 68us/step - loss: 0.5323 - accuracy: 0.7539 - val_loss: 0.6446 - val_accuracy: 0.6406
Epoch 46/400
191/191 [==============================] - 0s 63us/step - loss: 0.5322 - accuracy: 0.7539 - val_loss: 0.6446 - val_accuracy: 0.6406
Epoch 47/400
191/191 [==============================] - 0s 78us/step - loss: 0.5321 - accuracy: 0.7539 - val_loss: 0.6446 - val_accuracy: 0.6406
Epoch 48/400
191/191 [==============================] - 0s 73us/step - loss: 0.5319 - accuracy: 0.7539 - val_loss: 0.6445 - val_accuracy: 0.6406
Epoch 49/400
191/191 [==============================] - 0s 73us/step - loss: 0.5318 - accuracy: 0.7539 - val_loss: 0.6446 - val_accuracy: 0.6406
Epoch 50/400
191/191 [==============================] - 0s 68us/step - loss: 0.5317 - accuracy: 0.7539 - val_loss: 0.6445 - val_accuracy: 0.6406
Epoch 51/400
191/191 [==============================] - 0s 68us/step - loss: 0.5316 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 52/400
191/191 [==============================] - 0s 68us/step - loss: 0.5315 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 53/400
191/191 [==============================] - 0s 58us/step - loss: 0.5313 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 54/400
191/191 [==============================] - 0s 63us/step - loss: 0.5312 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00054: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
Epoch 55/400
191/191 [==============================] - 0s 73us/step - loss: 0.5311 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 56/400
191/191 [==============================] - 0s 115us/step - loss: 0.5311 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 57/400
191/191 [==============================] - ETA: 0s - loss: 0.5277 - accuracy: 0.78 - 0s 84us/step - loss: 0.5310 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 58/400
191/191 [==============================] - 0s 73us/step - loss: 0.5310 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 59/400
191/191 [==============================] - 0s 73us/step - loss: 0.5309 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 60/400
191/191 [==============================] - 0s 73us/step - loss: 0.5308 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 61/400
191/191 [==============================] - 0s 73us/step - loss: 0.5307 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 62/400
191/191 [==============================] - 0s 73us/step - loss: 0.5307 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 63/400
191/191 [==============================] - 0s 73us/step - loss: 0.5306 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 64/400
191/191 [==============================] - 0s 84us/step - loss: 0.5306 - accuracy: 0.7539 - val_loss: 0.6445 - val_accuracy: 0.6562

Epoch 00064: ReduceLROnPlateau reducing learning rate to 0.0001250000059371814.
Epoch 65/400
191/191 [==============================] - 0s 63us/step - loss: 0.5305 - accuracy: 0.7539 - val_loss: 0.6445 - val_accuracy: 0.6562
Epoch 66/400
191/191 [==============================] - 0s 58us/step - loss: 0.5305 - accuracy: 0.7539 - val_loss: 0.6445 - val_accuracy: 0.6562
Epoch 67/400
191/191 [==============================] - 0s 68us/step - loss: 0.5305 - accuracy: 0.7539 - val_loss: 0.6445 - val_accuracy: 0.6562
Epoch 68/400
191/191 [==============================] - 0s 73us/step - loss: 0.5304 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 69/400
191/191 [==============================] - 0s 68us/step - loss: 0.5304 - accuracy: 0.7539 - val_loss: 0.6445 - val_accuracy: 0.6562
Epoch 70/400
191/191 [==============================] - 0s 94us/step - loss: 0.5304 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 71/400
191/191 [==============================] - 0s 68us/step - loss: 0.5303 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 72/400
191/191 [==============================] - 0s 68us/step - loss: 0.5303 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 73/400
191/191 [==============================] - 0s 68us/step - loss: 0.5303 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 74/400
191/191 [==============================] - 0s 68us/step - loss: 0.5303 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00074: ReduceLROnPlateau reducing learning rate to 6.25000029685907e-05.
Epoch 75/400
191/191 [==============================] - 0s 94us/step - loss: 0.5302 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 76/400
191/191 [==============================] - 0s 99us/step - loss: 0.5302 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 77/400
191/191 [==============================] - 0s 78us/step - loss: 0.5302 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 78/400
191/191 [==============================] - 0s 68us/step - loss: 0.5302 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 79/400
191/191 [==============================] - 0s 78us/step - loss: 0.5302 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 80/400
191/191 [==============================] - 0s 68us/step - loss: 0.5301 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 81/400
191/191 [==============================] - 0s 73us/step - loss: 0.5301 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 82/400
191/191 [==============================] - 0s 73us/step - loss: 0.5301 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 83/400
191/191 [==============================] - 0s 73us/step - loss: 0.5301 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 84/400
191/191 [==============================] - 0s 52us/step - loss: 0.5301 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00084: ReduceLROnPlateau reducing learning rate to 3.125000148429535e-05.
Epoch 85/400
191/191 [==============================] - 0s 63us/step - loss: 0.5301 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 86/400
191/191 [==============================] - 0s 68us/step - loss: 0.5301 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 87/400
191/191 [==============================] - 0s 73us/step - loss: 0.5301 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 88/400
191/191 [==============================] - 0s 63us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 89/400
191/191 [==============================] - 0s 63us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 90/400
191/191 [==============================] - 0s 58us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 91/400
191/191 [==============================] - 0s 63us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 92/400
191/191 [==============================] - 0s 68us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 93/400
191/191 [==============================] - 0s 58us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 94/400
191/191 [==============================] - 0s 63us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00094: ReduceLROnPlateau reducing learning rate to 1.5625000742147677e-05.
Epoch 95/400
191/191 [==============================] - 0s 68us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 96/400
191/191 [==============================] - 0s 68us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 97/400
191/191 [==============================] - 0s 63us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 98/400
191/191 [==============================] - 0s 63us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 99/400
191/191 [==============================] - 0s 89us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 100/400
191/191 [==============================] - 0s 63us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 101/400
191/191 [==============================] - 0s 58us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 102/400
191/191 [==============================] - 0s 63us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 103/400
191/191 [==============================] - 0s 58us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 104/400
191/191 [==============================] - 0s 63us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00104: ReduceLROnPlateau reducing learning rate to 7.812500371073838e-06.
Epoch 105/400
191/191 [==============================] - 0s 73us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 106/400
191/191 [==============================] - 0s 63us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 107/400
191/191 [==============================] - 0s 63us/step - loss: 0.5300 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 108/400
191/191 [==============================] - 0s 89us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 109/400
191/191 [==============================] - 0s 84us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 110/400
191/191 [==============================] - 0s 89us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 111/400
191/191 [==============================] - 0s 89us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 112/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 113/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 114/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00114: ReduceLROnPlateau reducing learning rate to 3.906250185536919e-06.
Epoch 115/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 116/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 117/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 118/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 119/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 120/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 121/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 122/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 123/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 124/400
191/191 [==============================] - 0s 105us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00124: ReduceLROnPlateau reducing learning rate to 1.9531250927684596e-06.
Epoch 125/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 126/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 127/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 128/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 129/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 130/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 131/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 132/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 133/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 134/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00134: ReduceLROnPlateau reducing learning rate to 9.765625463842298e-07.
Epoch 135/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 136/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 137/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 138/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 139/400
191/191 [==============================] - 0s 94us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 140/400
191/191 [==============================] - 0s 84us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 141/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 142/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 143/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 144/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00144: ReduceLROnPlateau reducing learning rate to 4.882812731921149e-07.
Epoch 145/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 146/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 147/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 148/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 149/400
191/191 [==============================] - 0s 105us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 150/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 151/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 152/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 153/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 154/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00154: ReduceLROnPlateau reducing learning rate to 2.4414063659605745e-07.
Epoch 155/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 156/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 157/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 158/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 159/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 160/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 161/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 162/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 163/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 164/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00164: ReduceLROnPlateau reducing learning rate to 1.2207031829802872e-07.
Epoch 165/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 166/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 167/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 168/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 169/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 170/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 171/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 172/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 173/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 174/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00174: ReduceLROnPlateau reducing learning rate to 6.103515914901436e-08.
Epoch 175/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 176/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 177/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 178/400
191/191 [==============================] - 0s 84us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 179/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 180/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 181/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 182/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 183/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 184/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00184: ReduceLROnPlateau reducing learning rate to 3.051757957450718e-08.
Epoch 185/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 186/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 187/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 188/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 189/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 190/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 191/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 192/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 193/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 194/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00194: ReduceLROnPlateau reducing learning rate to 1.525878978725359e-08.
Epoch 195/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 196/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 197/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 198/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 199/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 200/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 201/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 202/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 203/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 204/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00204: ReduceLROnPlateau reducing learning rate to 7.629394893626795e-09.
Epoch 205/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 206/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 207/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 208/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 209/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 210/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 211/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 212/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 213/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 214/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00214: ReduceLROnPlateau reducing learning rate to 3.814697446813398e-09.
Epoch 215/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 216/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 217/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 218/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 219/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 220/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 221/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 222/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 223/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 224/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00224: ReduceLROnPlateau reducing learning rate to 1.907348723406699e-09.
Epoch 225/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 226/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 227/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 228/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 229/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 230/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 231/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 232/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 233/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 234/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00234: ReduceLROnPlateau reducing learning rate to 9.536743617033494e-10.
Epoch 235/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 236/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 237/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 238/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 239/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 240/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 241/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 242/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 243/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 244/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00244: ReduceLROnPlateau reducing learning rate to 4.768371808516747e-10.
Epoch 245/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 246/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 247/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 248/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 249/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 250/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 251/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 252/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 253/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 254/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00254: ReduceLROnPlateau reducing learning rate to 2.3841859042583735e-10.
Epoch 255/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 256/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 257/400
191/191 [==============================] - 0s 99us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 258/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 259/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 260/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 261/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 262/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 263/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 264/400
191/191 [==============================] - 0s 47us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00264: ReduceLROnPlateau reducing learning rate to 1.1920929521291868e-10.
Epoch 265/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 266/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 267/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 268/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 269/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 270/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 271/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 272/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 273/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 274/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00274: ReduceLROnPlateau reducing learning rate to 5.960464760645934e-11.
Epoch 275/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 276/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 277/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 278/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 279/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 280/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 281/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 282/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 283/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 284/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00284: ReduceLROnPlateau reducing learning rate to 2.980232380322967e-11.
Epoch 285/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 286/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 287/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 288/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 289/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 290/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 291/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 292/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 293/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 294/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00294: ReduceLROnPlateau reducing learning rate to 1.4901161901614834e-11.
Epoch 295/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 296/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 297/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 298/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 299/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 300/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 301/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 302/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 303/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 304/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00304: ReduceLROnPlateau reducing learning rate to 7.450580950807417e-12.
Epoch 305/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 306/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 307/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 308/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 309/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 310/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 311/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 312/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 313/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 314/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00314: ReduceLROnPlateau reducing learning rate to 3.725290475403709e-12.
Epoch 315/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 316/400
191/191 [==============================] - 0s 47us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 317/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 318/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 319/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 320/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 321/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 322/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 323/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 324/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00324: ReduceLROnPlateau reducing learning rate to 1.8626452377018543e-12.
Epoch 325/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 326/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 327/400
191/191 [==============================] - 0s 47us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 328/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 329/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 330/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 331/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 332/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 333/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 334/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00334: ReduceLROnPlateau reducing learning rate to 9.313226188509272e-13.
Epoch 335/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 336/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 337/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 338/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 339/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 340/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 341/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 342/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 343/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 344/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00344: ReduceLROnPlateau reducing learning rate to 4.656613094254636e-13.
Epoch 345/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 346/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 347/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 348/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 349/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 350/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 351/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 352/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 353/400
191/191 [==============================] - 0s 73us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 354/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00354: ReduceLROnPlateau reducing learning rate to 2.328306547127318e-13.
Epoch 355/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 356/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 357/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 358/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 359/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 360/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 361/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 362/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 363/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 364/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00364: ReduceLROnPlateau reducing learning rate to 1.164153273563659e-13.
Epoch 365/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 366/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 367/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 368/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 369/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 370/400
191/191 [==============================] - 0s 47us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 371/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 372/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 373/400
191/191 [==============================] - 0s 78us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 374/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00374: ReduceLROnPlateau reducing learning rate to 5.820766367818295e-14.
Epoch 375/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 376/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 377/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 378/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 379/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 380/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 381/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 382/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 383/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 384/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00384: ReduceLROnPlateau reducing learning rate to 2.9103831839091474e-14.
Epoch 385/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 386/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 387/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 388/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 389/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 390/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 391/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 392/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 393/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 394/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562

Epoch 00394: ReduceLROnPlateau reducing learning rate to 1.4551915919545737e-14.
Epoch 395/400
191/191 [==============================] - 0s 68us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 396/400
191/191 [==============================] - 0s 63us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 397/400
191/191 [==============================] - 0s 58us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 398/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 399/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
Epoch 400/400
191/191 [==============================] - 0s 52us/step - loss: 0.5299 - accuracy: 0.7539 - val_loss: 0.6444 - val_accuracy: 0.6562
In [239]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 400)
In [240]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
64/64 [==============================] - 0s 62us/step
test loss: 0.644367516040802, test accuracy: 0.65625
In [241]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.49637681159420294
In [242]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
Kappa:  -0.023255813953488413

KMeans

In [243]:
X
Out[243]:
tonalcentroidfiles_1 tonalcentroidfiles_2 tonalcentroidfiles_3 tonalcentroidfiles_4 tonalcentroidfiles_5 tonalcentroidfiles_6
0 0.898091 0.151819 -1.172713 0.474387 -0.020230 1.228657
1 0.618513 -0.762588 0.061946 0.944076 0.697880 0.021150
2 0.685649 0.002933 0.719805 -1.251700 -0.952424 1.444556
3 1.175209 -0.552349 0.336427 0.482978 -0.212146 -0.144225
4 1.350337 -1.407757 0.258917 -0.523670 0.099306 1.706064
5 0.907564 -1.769301 1.177857 -0.869472 0.392594 0.385760
6 -0.071420 -0.800769 0.238726 1.318866 -1.075628 -0.545006
7 0.476433 -1.202140 -1.713665 0.379487 -0.347674 0.777899
8 0.572039 -1.488738 -0.403914 -1.066061 -0.818836 0.339231
9 0.741137 0.139987 0.726307 1.670135 -0.317435 -1.091941
10 0.533655 -0.111619 0.435253 1.832919 -0.556933 -1.014603
11 -0.667308 0.502566 -1.137726 -0.714521 -0.497571 0.123297
12 0.161812 0.294263 0.659166 -0.336211 1.410350 -0.272418
13 -0.373777 -1.439681 0.009190 0.731635 0.138615 0.850511
14 0.745550 0.214669 0.209787 0.424963 0.448908 -0.204578
15 0.320726 0.108060 0.208510 -1.138882 -0.874041 -1.779091
16 0.646392 -0.726119 0.153724 -0.203580 -1.017329 -1.068601
17 -0.042981 -0.672256 0.358250 -0.385808 -0.341018 -1.823744
18 0.822192 0.184879 1.658679 1.705929 3.070140 -1.218005
19 0.175070 0.195153 1.969940 0.005043 0.430538 -1.502715
20 1.339692 -1.202498 0.487937 -0.769520 -1.973308 -0.400699
21 1.290923 -0.546138 0.120024 0.429258 -0.165681 0.856938
22 1.528224 -0.912727 0.962682 -0.386673 -0.772181 -0.291766
23 -0.486779 -1.124424 0.559106 0.746533 -1.101240 1.082216
24 -0.230729 0.999926 -0.678209 -0.175670 1.412258 0.572372
25 -0.632681 0.618852 -0.778803 -0.808112 -0.442115 -0.146177
26 -1.151505 -1.127449 1.500641 -0.822825 0.158380 0.792656
27 0.265739 -3.078847 -0.939567 0.268673 -0.642098 -0.984495
28 0.623357 -1.241561 -1.149654 1.231993 2.023015 -0.070476
29 0.930863 -1.763587 -1.608926 0.462097 -0.677599 -0.693427
... ... ... ... ... ... ...
225 -1.444140 -0.088370 -0.458428 0.530251 -0.475625 -0.057486
226 -0.297006 0.887935 0.467148 2.000374 -0.396849 -0.846195
227 -1.624166 0.777486 0.635044 -1.376180 0.998008 -0.910882
228 0.230618 1.438780 0.301556 -1.353873 -0.586627 -0.102947
229 -0.163123 1.329205 0.721279 -1.383030 0.540446 -1.181571
230 -1.337576 0.249897 0.081067 0.886335 -0.078090 -0.344245
231 0.304553 0.584052 0.915910 2.455180 1.007231 0.268298
232 -0.291785 0.247731 -0.740382 0.896773 0.457951 0.390640
233 -0.532056 1.686101 0.358185 -1.561985 0.911246 0.638759
234 -1.223692 0.723005 0.599197 -0.955626 0.653814 0.112686
235 1.412552 -0.817418 0.038464 -2.397710 -2.903923 1.454325
236 0.141392 -0.756740 -1.981390 -0.636588 0.230786 0.968907
237 1.157567 -0.442417 -1.342532 -0.893118 -0.552517 -0.791388
238 -1.683225 -0.036571 0.297162 -1.488549 1.387872 -0.306946
239 -0.997159 0.655257 2.239993 -1.422875 0.373101 0.159004
240 -1.142741 0.931927 1.440876 0.665641 -0.994237 -1.093039
241 -0.151675 -0.971306 0.447819 0.895444 -0.863907 0.150120
242 -0.837654 -1.170592 0.622658 0.448216 -0.830715 -0.222067
243 -0.059101 -0.857751 0.253657 0.272951 -0.833270 0.160823
244 1.455210 -1.123798 1.124970 -1.841854 -0.183521 -0.193778
245 1.459407 -1.071308 -0.261053 -0.731205 0.603463 0.358072
246 1.850117 -1.364586 1.015519 -1.479941 -1.262489 -0.485304
247 0.468703 0.776904 -1.200084 -0.109459 0.572206 0.353229
248 0.758187 -0.030802 -1.190930 -0.092637 0.048267 2.174173
249 0.465492 -0.042081 0.541343 0.584645 0.066443 -1.886670
250 -1.114193 1.666162 0.201458 -1.543125 -0.123758 -0.430641
251 -1.675129 1.101864 0.721966 -1.964153 0.827116 0.134812
252 -1.371728 0.888874 -0.186673 -0.931346 0.795500 -1.063218
253 0.221249 0.272024 -1.593712 -0.242394 0.752955 1.102656
254 -0.747040 1.308435 0.858494 -1.950134 1.779312 -0.711789

255 rows × 6 columns

In [244]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[244]:
[1530.0000000000002,
 1266.8988304034983,
 1085.4171102625123,
 963.5827926636907,
 872.5239995069635,
 797.6140851961846,
 747.1323294070899,
 703.670300371115,
 664.3614627122823,
 637.5590430281768,
 607.7011770650902,
 585.4389967082509,
 558.8506960652073,
 540.5660329891642]
In [245]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[245]:
[<matplotlib.lines.Line2D at 0x1e82fedf908>]

K=3

In [248]:
kmeans_ch = KMeans(n_clusters=3, random_state=0, n_init=10)
kmeans_ch.fit(X)
Out[248]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=3, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [249]:
kmeans_ch.labels_
Out[249]:
array([2, 0, 2, 0, 2, 0, 0, 2, 2, 0, 0, 2, 1, 0, 0, 1, 0, 0, 0, 0, 0, 2,
       0, 0, 1, 1, 0, 0, 0, 2, 2, 0, 0, 1, 1, 0, 2, 2, 2, 2, 2, 0, 0, 0,
       0, 2, 2, 0, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, 2, 2, 2, 2, 0,
       1, 1, 1, 1, 1, 0, 2, 0, 0, 2, 2, 2, 0, 0, 2, 2, 0, 2, 0, 1, 0, 1,
       1, 1, 2, 2, 2, 0, 0, 0, 2, 1, 2, 1, 0, 2, 0, 2, 2, 1, 1, 2, 1, 1,
       1, 0, 1, 2, 0, 2, 2, 1, 1, 1, 2, 2, 0, 2, 2, 2, 1, 0, 2, 1, 0, 0,
       2, 2, 2, 2, 1, 1, 1, 2, 0, 0, 0, 0, 1, 0, 2, 1, 2, 2, 2, 2, 1, 1,
       1, 1, 2, 1, 1, 1, 2, 2, 1, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 1, 2, 2,
       1, 2, 0, 1, 1, 2, 2, 2, 2, 0, 0, 2, 2, 1, 0, 2, 1, 1, 1, 2, 1, 2,
       2, 2, 1, 1, 2, 2, 1, 2, 0, 2, 0, 2, 2, 0, 0, 0, 0, 0, 2, 2, 0, 2,
       0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 2, 1, 1, 2, 2, 2, 1, 1, 1, 0,
       0, 0, 0, 2, 0, 2, 2, 0, 1, 1, 1, 2, 1])
In [250]:
clusters_ch = kmeans_ch.predict(X)
clusters_ch
Out[250]:
array([2, 0, 2, 0, 2, 0, 0, 2, 2, 0, 0, 2, 1, 0, 0, 1, 0, 0, 0, 0, 0, 2,
       0, 0, 1, 1, 0, 0, 0, 2, 2, 0, 0, 1, 1, 0, 2, 2, 2, 2, 2, 0, 0, 0,
       0, 2, 2, 0, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, 2, 2, 2, 2, 0,
       1, 1, 1, 1, 1, 0, 2, 0, 0, 2, 2, 2, 0, 0, 2, 2, 0, 2, 0, 1, 0, 1,
       1, 1, 2, 2, 2, 0, 0, 0, 2, 1, 2, 1, 0, 2, 0, 2, 2, 1, 1, 2, 1, 1,
       1, 0, 1, 2, 0, 2, 2, 1, 1, 1, 2, 2, 0, 2, 2, 2, 1, 0, 2, 1, 0, 0,
       2, 2, 2, 2, 1, 1, 1, 2, 0, 0, 0, 0, 1, 0, 2, 1, 2, 2, 2, 2, 1, 1,
       1, 1, 2, 1, 1, 1, 2, 2, 1, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 1, 2, 2,
       1, 2, 0, 1, 1, 2, 2, 2, 2, 0, 0, 2, 2, 1, 0, 2, 1, 1, 1, 2, 1, 2,
       2, 2, 1, 1, 2, 2, 1, 2, 0, 2, 0, 2, 2, 0, 0, 0, 0, 0, 2, 2, 0, 2,
       0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 2, 1, 1, 2, 2, 2, 1, 1, 1, 0,
       0, 0, 0, 2, 0, 2, 2, 0, 1, 1, 1, 2, 1])
In [251]:
X.loc[:,'Cluster'] = clusters_ch
X.loc[:,'chosen'] = list(y)
In [252]:
X
Out[252]:
tonalcentroidfiles_1 tonalcentroidfiles_2 tonalcentroidfiles_3 tonalcentroidfiles_4 tonalcentroidfiles_5 tonalcentroidfiles_6 Cluster chosen
0 0.898091 0.151819 -1.172713 0.474387 -0.020230 1.228657 2 0
1 0.618513 -0.762588 0.061946 0.944076 0.697880 0.021150 0 0
2 0.685649 0.002933 0.719805 -1.251700 -0.952424 1.444556 2 0
3 1.175209 -0.552349 0.336427 0.482978 -0.212146 -0.144225 0 0
4 1.350337 -1.407757 0.258917 -0.523670 0.099306 1.706064 2 0
5 0.907564 -1.769301 1.177857 -0.869472 0.392594 0.385760 0 0
6 -0.071420 -0.800769 0.238726 1.318866 -1.075628 -0.545006 0 0
7 0.476433 -1.202140 -1.713665 0.379487 -0.347674 0.777899 2 0
8 0.572039 -1.488738 -0.403914 -1.066061 -0.818836 0.339231 2 0
9 0.741137 0.139987 0.726307 1.670135 -0.317435 -1.091941 0 0
10 0.533655 -0.111619 0.435253 1.832919 -0.556933 -1.014603 0 0
11 -0.667308 0.502566 -1.137726 -0.714521 -0.497571 0.123297 2 0
12 0.161812 0.294263 0.659166 -0.336211 1.410350 -0.272418 1 0
13 -0.373777 -1.439681 0.009190 0.731635 0.138615 0.850511 0 0
14 0.745550 0.214669 0.209787 0.424963 0.448908 -0.204578 0 0
15 0.320726 0.108060 0.208510 -1.138882 -0.874041 -1.779091 1 0
16 0.646392 -0.726119 0.153724 -0.203580 -1.017329 -1.068601 0 0
17 -0.042981 -0.672256 0.358250 -0.385808 -0.341018 -1.823744 0 0
18 0.822192 0.184879 1.658679 1.705929 3.070140 -1.218005 0 0
19 0.175070 0.195153 1.969940 0.005043 0.430538 -1.502715 0 0
20 1.339692 -1.202498 0.487937 -0.769520 -1.973308 -0.400699 0 0
21 1.290923 -0.546138 0.120024 0.429258 -0.165681 0.856938 2 0
22 1.528224 -0.912727 0.962682 -0.386673 -0.772181 -0.291766 0 0
23 -0.486779 -1.124424 0.559106 0.746533 -1.101240 1.082216 0 0
24 -0.230729 0.999926 -0.678209 -0.175670 1.412258 0.572372 1 0
25 -0.632681 0.618852 -0.778803 -0.808112 -0.442115 -0.146177 1 0
26 -1.151505 -1.127449 1.500641 -0.822825 0.158380 0.792656 0 0
27 0.265739 -3.078847 -0.939567 0.268673 -0.642098 -0.984495 0 0
28 0.623357 -1.241561 -1.149654 1.231993 2.023015 -0.070476 0 0
29 0.930863 -1.763587 -1.608926 0.462097 -0.677599 -0.693427 2 0
... ... ... ... ... ... ... ... ...
225 -1.444140 -0.088370 -0.458428 0.530251 -0.475625 -0.057486 1 1
226 -0.297006 0.887935 0.467148 2.000374 -0.396849 -0.846195 0 1
227 -1.624166 0.777486 0.635044 -1.376180 0.998008 -0.910882 1 1
228 0.230618 1.438780 0.301556 -1.353873 -0.586627 -0.102947 1 1
229 -0.163123 1.329205 0.721279 -1.383030 0.540446 -1.181571 1 1
230 -1.337576 0.249897 0.081067 0.886335 -0.078090 -0.344245 1 1
231 0.304553 0.584052 0.915910 2.455180 1.007231 0.268298 0 1
232 -0.291785 0.247731 -0.740382 0.896773 0.457951 0.390640 2 1
233 -0.532056 1.686101 0.358185 -1.561985 0.911246 0.638759 1 1
234 -1.223692 0.723005 0.599197 -0.955626 0.653814 0.112686 1 1
235 1.412552 -0.817418 0.038464 -2.397710 -2.903923 1.454325 2 1
236 0.141392 -0.756740 -1.981390 -0.636588 0.230786 0.968907 2 1
237 1.157567 -0.442417 -1.342532 -0.893118 -0.552517 -0.791388 2 1
238 -1.683225 -0.036571 0.297162 -1.488549 1.387872 -0.306946 1 1
239 -0.997159 0.655257 2.239993 -1.422875 0.373101 0.159004 1 1
240 -1.142741 0.931927 1.440876 0.665641 -0.994237 -1.093039 1 1
241 -0.151675 -0.971306 0.447819 0.895444 -0.863907 0.150120 0 1
242 -0.837654 -1.170592 0.622658 0.448216 -0.830715 -0.222067 0 1
243 -0.059101 -0.857751 0.253657 0.272951 -0.833270 0.160823 0 1
244 1.455210 -1.123798 1.124970 -1.841854 -0.183521 -0.193778 0 1
245 1.459407 -1.071308 -0.261053 -0.731205 0.603463 0.358072 2 1
246 1.850117 -1.364586 1.015519 -1.479941 -1.262489 -0.485304 0 1
247 0.468703 0.776904 -1.200084 -0.109459 0.572206 0.353229 2 1
248 0.758187 -0.030802 -1.190930 -0.092637 0.048267 2.174173 2 1
249 0.465492 -0.042081 0.541343 0.584645 0.066443 -1.886670 0 1
250 -1.114193 1.666162 0.201458 -1.543125 -0.123758 -0.430641 1 1
251 -1.675129 1.101864 0.721966 -1.964153 0.827116 0.134812 1 1
252 -1.371728 0.888874 -0.186673 -0.931346 0.795500 -1.063218 1 1
253 0.221249 0.272024 -1.593712 -0.242394 0.752955 1.102656 2 1
254 -0.747040 1.308435 0.858494 -1.950134 1.779312 -0.711789 1 1

255 rows × 8 columns

In [253]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[253]:
<matplotlib.axes._subplots.AxesSubplot at 0x1e8304ba3c8>
In [75]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[2]))

Gramma

ANN

In [256]:
X = df_n_ps_std_ch[2]
In [257]:
y = df_n_ps[2]['chosen']
In [258]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [259]:
X_train.shape
Out[259]:
(231, 6)
In [260]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [261]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [262]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [263]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'logistic', 'hidden_layer_sizes': (10,), 'learning_rate_init': 0.01, 'max_iter': 10}, que permiten obtener un Accuracy de 81.82% y un Kappa del 6.27
Tiempo total: 28.04 minutos
In [84]:
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [85]:
input_tensor = Input(shape = (n0,))
In [86]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = 'tanh')(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [87]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [88]:
model.summary()
Model: "model_3"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_3 (InputLayer)         (None, 13)                0         
_________________________________________________________________
dense_8 (Dense)              (None, 30)                420       
_________________________________________________________________
dense_9 (Dense)              (None, 1)                 31        
=================================================================
Total params: 451
Trainable params: 451
Non-trainable params: 0
_________________________________________________________________
In [89]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), batch_size= 32,
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 231 samples, validate on 78 samples
Epoch 1/1000
231/231 [==============================] - 0s 1ms/step - loss: 0.8119 - accuracy: 0.4589 - val_loss: 0.7457 - val_accuracy: 0.5385
Epoch 2/1000
231/231 [==============================] - 0s 126us/step - loss: 0.7766 - accuracy: 0.4935 - val_loss: 0.7248 - val_accuracy: 0.5513
Epoch 3/1000
231/231 [==============================] - 0s 61us/step - loss: 0.7467 - accuracy: 0.5411 - val_loss: 0.7062 - val_accuracy: 0.5385
Epoch 4/1000
231/231 [==============================] - 0s 52us/step - loss: 0.7166 - accuracy: 0.5931 - val_loss: 0.6883 - val_accuracy: 0.5513
Epoch 5/1000
231/231 [==============================] - 0s 56us/step - loss: 0.6911 - accuracy: 0.6147 - val_loss: 0.6739 - val_accuracy: 0.5641
Epoch 6/1000
231/231 [==============================] - 0s 56us/step - loss: 0.6673 - accuracy: 0.6234 - val_loss: 0.6607 - val_accuracy: 0.5897
Epoch 7/1000
231/231 [==============================] - 0s 78us/step - loss: 0.6460 - accuracy: 0.6364 - val_loss: 0.6480 - val_accuracy: 0.5897
Epoch 8/1000
231/231 [==============================] - 0s 56us/step - loss: 0.6258 - accuracy: 0.6623 - val_loss: 0.6371 - val_accuracy: 0.6154
Epoch 9/1000
231/231 [==============================] - 0s 74us/step - loss: 0.6079 - accuracy: 0.6883 - val_loss: 0.6269 - val_accuracy: 0.6667
Epoch 10/1000
231/231 [==============================] - 0s 61us/step - loss: 0.5915 - accuracy: 0.7229 - val_loss: 0.6189 - val_accuracy: 0.6923
Epoch 11/1000
231/231 [==============================] - 0s 61us/step - loss: 0.5768 - accuracy: 0.7403 - val_loss: 0.6096 - val_accuracy: 0.7051
Epoch 12/1000
231/231 [==============================] - 0s 56us/step - loss: 0.5636 - accuracy: 0.7532 - val_loss: 0.6025 - val_accuracy: 0.7308
Epoch 13/1000
231/231 [==============================] - 0s 61us/step - loss: 0.5516 - accuracy: 0.7662 - val_loss: 0.5950 - val_accuracy: 0.7564
Epoch 14/1000
231/231 [==============================] - 0s 56us/step - loss: 0.5405 - accuracy: 0.7619 - val_loss: 0.5887 - val_accuracy: 0.7692
Epoch 15/1000
231/231 [==============================] - 0s 61us/step - loss: 0.5306 - accuracy: 0.7706 - val_loss: 0.5840 - val_accuracy: 0.7564
Epoch 16/1000
231/231 [==============================] - 0s 91us/step - loss: 0.5200 - accuracy: 0.7835 - val_loss: 0.5799 - val_accuracy: 0.7564
Epoch 17/1000
231/231 [==============================] - 0s 65us/step - loss: 0.5108 - accuracy: 0.7879 - val_loss: 0.5743 - val_accuracy: 0.7436
Epoch 18/1000
231/231 [==============================] - 0s 91us/step - loss: 0.5024 - accuracy: 0.7965 - val_loss: 0.5700 - val_accuracy: 0.7179
Epoch 19/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4944 - accuracy: 0.8139 - val_loss: 0.5660 - val_accuracy: 0.7051
Epoch 20/1000
231/231 [==============================] - 0s 69us/step - loss: 0.4864 - accuracy: 0.8139 - val_loss: 0.5606 - val_accuracy: 0.7179
Epoch 21/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4797 - accuracy: 0.8139 - val_loss: 0.5552 - val_accuracy: 0.7179
Epoch 22/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4734 - accuracy: 0.8182 - val_loss: 0.5507 - val_accuracy: 0.7308
Epoch 23/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4674 - accuracy: 0.8225 - val_loss: 0.5486 - val_accuracy: 0.7308
Epoch 24/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4615 - accuracy: 0.8225 - val_loss: 0.5479 - val_accuracy: 0.7436

Epoch 00024: ReduceLROnPlateau reducing learning rate to 0.0005000000237487257.
Epoch 25/1000
231/231 [==============================] - ETA: 0s - loss: 0.4421 - accuracy: 0.84 - 0s 56us/step - loss: 0.4574 - accuracy: 0.8182 - val_loss: 0.5472 - val_accuracy: 0.7436
Epoch 26/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4552 - accuracy: 0.8182 - val_loss: 0.5465 - val_accuracy: 0.7564
Epoch 27/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4527 - accuracy: 0.8139 - val_loss: 0.5450 - val_accuracy: 0.7564
Epoch 28/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4502 - accuracy: 0.8139 - val_loss: 0.5441 - val_accuracy: 0.7564
Epoch 29/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4479 - accuracy: 0.8182 - val_loss: 0.5429 - val_accuracy: 0.7564
Epoch 30/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4459 - accuracy: 0.8182 - val_loss: 0.5424 - val_accuracy: 0.7436
Epoch 31/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4439 - accuracy: 0.8182 - val_loss: 0.5413 - val_accuracy: 0.7436
Epoch 32/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4419 - accuracy: 0.8182 - val_loss: 0.5407 - val_accuracy: 0.7436
Epoch 33/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4402 - accuracy: 0.8139 - val_loss: 0.5400 - val_accuracy: 0.7436
Epoch 34/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4385 - accuracy: 0.8139 - val_loss: 0.5398 - val_accuracy: 0.7436

Epoch 00034: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
Epoch 35/1000
231/231 [==============================] - 0s 52us/step - loss: 0.4370 - accuracy: 0.8139 - val_loss: 0.5392 - val_accuracy: 0.7436
Epoch 36/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4361 - accuracy: 0.8182 - val_loss: 0.5391 - val_accuracy: 0.7436
Epoch 37/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4353 - accuracy: 0.8182 - val_loss: 0.5390 - val_accuracy: 0.7436
Epoch 38/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4346 - accuracy: 0.8182 - val_loss: 0.5384 - val_accuracy: 0.7436
Epoch 39/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4337 - accuracy: 0.8182 - val_loss: 0.5387 - val_accuracy: 0.7436
Epoch 40/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4329 - accuracy: 0.8182 - val_loss: 0.5382 - val_accuracy: 0.7436
Epoch 41/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4320 - accuracy: 0.8182 - val_loss: 0.5377 - val_accuracy: 0.7436
Epoch 42/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4311 - accuracy: 0.8139 - val_loss: 0.5370 - val_accuracy: 0.7436
Epoch 43/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4304 - accuracy: 0.8095 - val_loss: 0.5363 - val_accuracy: 0.7436
Epoch 44/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4298 - accuracy: 0.8095 - val_loss: 0.5358 - val_accuracy: 0.7436

Epoch 00044: ReduceLROnPlateau reducing learning rate to 0.0001250000059371814.
Epoch 45/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4291 - accuracy: 0.8139 - val_loss: 0.5355 - val_accuracy: 0.7436
Epoch 46/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4286 - accuracy: 0.8139 - val_loss: 0.5353 - val_accuracy: 0.7564
Epoch 47/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4282 - accuracy: 0.8182 - val_loss: 0.5353 - val_accuracy: 0.7436
Epoch 48/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4279 - accuracy: 0.8182 - val_loss: 0.5352 - val_accuracy: 0.7564
Epoch 49/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4275 - accuracy: 0.8182 - val_loss: 0.5350 - val_accuracy: 0.7436
Epoch 50/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4271 - accuracy: 0.8182 - val_loss: 0.5351 - val_accuracy: 0.7564
Epoch 51/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4267 - accuracy: 0.8182 - val_loss: 0.5350 - val_accuracy: 0.7436
Epoch 52/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4265 - accuracy: 0.8182 - val_loss: 0.5350 - val_accuracy: 0.7436
Epoch 53/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4261 - accuracy: 0.8182 - val_loss: 0.5348 - val_accuracy: 0.7436
Epoch 54/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4258 - accuracy: 0.8182 - val_loss: 0.5344 - val_accuracy: 0.7436

Epoch 00054: ReduceLROnPlateau reducing learning rate to 6.25000029685907e-05.
Epoch 55/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4255 - accuracy: 0.8182 - val_loss: 0.5345 - val_accuracy: 0.7436
Epoch 56/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4253 - accuracy: 0.8182 - val_loss: 0.5344 - val_accuracy: 0.7436
Epoch 57/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4251 - accuracy: 0.8182 - val_loss: 0.5344 - val_accuracy: 0.7436
Epoch 58/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4250 - accuracy: 0.8182 - val_loss: 0.5345 - val_accuracy: 0.7436
Epoch 59/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4248 - accuracy: 0.8182 - val_loss: 0.5347 - val_accuracy: 0.7436
Epoch 60/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4246 - accuracy: 0.8182 - val_loss: 0.5348 - val_accuracy: 0.7436
Epoch 61/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4244 - accuracy: 0.8182 - val_loss: 0.5348 - val_accuracy: 0.7436
Epoch 62/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4243 - accuracy: 0.8139 - val_loss: 0.5347 - val_accuracy: 0.7436
Epoch 63/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4241 - accuracy: 0.8139 - val_loss: 0.5348 - val_accuracy: 0.7436
Epoch 64/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4240 - accuracy: 0.8139 - val_loss: 0.5347 - val_accuracy: 0.7436

Epoch 00064: ReduceLROnPlateau reducing learning rate to 3.125000148429535e-05.
Epoch 65/1000
231/231 [==============================] - 0s 69us/step - loss: 0.4238 - accuracy: 0.8182 - val_loss: 0.5347 - val_accuracy: 0.7436
Epoch 66/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4237 - accuracy: 0.8139 - val_loss: 0.5347 - val_accuracy: 0.7436
Epoch 67/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4236 - accuracy: 0.8139 - val_loss: 0.5348 - val_accuracy: 0.7436
Epoch 68/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4235 - accuracy: 0.8139 - val_loss: 0.5347 - val_accuracy: 0.7436
Epoch 69/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4235 - accuracy: 0.8139 - val_loss: 0.5347 - val_accuracy: 0.7436
Epoch 70/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4234 - accuracy: 0.8139 - val_loss: 0.5346 - val_accuracy: 0.7436
Epoch 71/1000
231/231 [==============================] - 0s 69us/step - loss: 0.4233 - accuracy: 0.8139 - val_loss: 0.5346 - val_accuracy: 0.7436
Epoch 72/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4232 - accuracy: 0.8139 - val_loss: 0.5346 - val_accuracy: 0.7436
Epoch 73/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4232 - accuracy: 0.8139 - val_loss: 0.5346 - val_accuracy: 0.7436
Epoch 74/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4231 - accuracy: 0.8139 - val_loss: 0.5345 - val_accuracy: 0.7436

Epoch 00074: ReduceLROnPlateau reducing learning rate to 1.5625000742147677e-05.
Epoch 75/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4230 - accuracy: 0.8139 - val_loss: 0.5345 - val_accuracy: 0.7436
Epoch 76/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4230 - accuracy: 0.8139 - val_loss: 0.5345 - val_accuracy: 0.7436
Epoch 77/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4229 - accuracy: 0.8139 - val_loss: 0.5345 - val_accuracy: 0.7436
Epoch 78/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4229 - accuracy: 0.8139 - val_loss: 0.5345 - val_accuracy: 0.7436
Epoch 79/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4228 - accuracy: 0.8139 - val_loss: 0.5344 - val_accuracy: 0.7436
Epoch 80/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4228 - accuracy: 0.8139 - val_loss: 0.5344 - val_accuracy: 0.7436
Epoch 81/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4227 - accuracy: 0.8139 - val_loss: 0.5344 - val_accuracy: 0.7436
Epoch 82/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4227 - accuracy: 0.8139 - val_loss: 0.5345 - val_accuracy: 0.7436
Epoch 83/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4227 - accuracy: 0.8139 - val_loss: 0.5344 - val_accuracy: 0.7436
Epoch 84/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4226 - accuracy: 0.8139 - val_loss: 0.5344 - val_accuracy: 0.7436

Epoch 00084: ReduceLROnPlateau reducing learning rate to 7.812500371073838e-06.
Epoch 85/1000
231/231 [==============================] - 0s 69us/step - loss: 0.4226 - accuracy: 0.8139 - val_loss: 0.5344 - val_accuracy: 0.7436
Epoch 86/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4225 - accuracy: 0.8139 - val_loss: 0.5344 - val_accuracy: 0.7436
Epoch 87/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4225 - accuracy: 0.8139 - val_loss: 0.5344 - val_accuracy: 0.7436
Epoch 88/1000
231/231 [==============================] - 0s 69us/step - loss: 0.4225 - accuracy: 0.8139 - val_loss: 0.5344 - val_accuracy: 0.7436
Epoch 89/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4225 - accuracy: 0.8139 - val_loss: 0.5344 - val_accuracy: 0.7436
Epoch 90/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4225 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 91/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4224 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 92/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4224 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 93/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4224 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 94/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4224 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436

Epoch 00094: ReduceLROnPlateau reducing learning rate to 3.906250185536919e-06.
Epoch 95/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4224 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 96/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4223 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 97/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4223 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 98/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4223 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 99/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4223 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 100/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4223 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 101/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4223 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 102/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4223 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 103/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4223 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 104/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4223 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436

Epoch 00104: ReduceLROnPlateau reducing learning rate to 1.9531250927684596e-06.
Epoch 105/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4223 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 106/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 107/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 108/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 109/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 110/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 111/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 112/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 113/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5343 - val_accuracy: 0.7436
Epoch 114/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00114: ReduceLROnPlateau reducing learning rate to 9.765625463842298e-07.
Epoch 115/1000
231/231 [==============================] - 0s 130us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 116/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 117/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 118/1000
231/231 [==============================] - 0s 61us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 119/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 120/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 121/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 122/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 123/1000
231/231 [==============================] - 0s 65us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 124/1000
231/231 [==============================] - 0s 56us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00124: ReduceLROnPlateau reducing learning rate to 4.882812731921149e-07.
Epoch 125/1000
231/231 [==============================] - 0s 69us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 126/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 127/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 128/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 129/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 130/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 131/1000
231/231 [==============================] - 0s 121us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 132/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 133/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 134/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00134: ReduceLROnPlateau reducing learning rate to 2.4414063659605745e-07.
Epoch 135/1000
231/231 [==============================] - 0s 134us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 136/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 137/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 138/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 139/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 140/1000
231/231 [==============================] - 0s 117us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 141/1000
231/231 [==============================] - 0s 121us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 142/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 143/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 144/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00144: ReduceLROnPlateau reducing learning rate to 1.2207031829802872e-07.
Epoch 145/1000
231/231 [==============================] - 0s 1ms/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 146/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 147/1000
231/231 [==============================] - 0s 121us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 148/1000
231/231 [==============================] - 0s 114us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 149/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 150/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 151/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 152/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 153/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4222 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 154/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00154: ReduceLROnPlateau reducing learning rate to 6.103515914901436e-08.
Epoch 155/1000
231/231 [==============================] - 0s 117us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 156/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 157/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 158/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 159/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 160/1000
231/231 [==============================] - 0s 117us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 161/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 162/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 163/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 164/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00164: ReduceLROnPlateau reducing learning rate to 3.051757957450718e-08.
Epoch 165/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 166/1000
231/231 [==============================] - 0s 143us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 167/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 168/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 169/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 170/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 171/1000
231/231 [==============================] - 0s 117us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 172/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 173/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 174/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00174: ReduceLROnPlateau reducing learning rate to 1.525878978725359e-08.
Epoch 175/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 176/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 177/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 178/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 179/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 180/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 181/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 182/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 183/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 184/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00184: ReduceLROnPlateau reducing learning rate to 7.629394893626795e-09.
Epoch 185/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 186/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 187/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 188/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 189/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 190/1000
231/231 [==============================] - 0s 125us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 191/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 192/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 193/1000
231/231 [==============================] - ETA: 0s - loss: 0.3107 - accuracy: 0.90 - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 194/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00194: ReduceLROnPlateau reducing learning rate to 3.814697446813398e-09.
Epoch 195/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 196/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 197/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 198/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 199/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 200/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 201/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 202/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 203/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 204/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00204: ReduceLROnPlateau reducing learning rate to 1.907348723406699e-09.
Epoch 205/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 206/1000
231/231 [==============================] - 0s 121us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 207/1000
231/231 [==============================] - 0s 121us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 208/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 209/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 210/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 211/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 212/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 213/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 214/1000
231/231 [==============================] - 0s 117us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00214: ReduceLROnPlateau reducing learning rate to 9.536743617033494e-10.
Epoch 215/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 216/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 217/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 218/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 219/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 220/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 221/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 222/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 223/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 224/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00224: ReduceLROnPlateau reducing learning rate to 4.768371808516747e-10.
Epoch 225/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 226/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 227/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 228/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 229/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 230/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 231/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 232/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 233/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 234/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00234: ReduceLROnPlateau reducing learning rate to 2.3841859042583735e-10.
Epoch 235/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 236/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 237/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 238/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 239/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 240/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 241/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 242/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 243/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 244/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00244: ReduceLROnPlateau reducing learning rate to 1.1920929521291868e-10.
Epoch 245/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 246/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 247/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 248/1000
231/231 [==============================] - 0s 117us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 249/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 250/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 251/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 252/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 253/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 254/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00254: ReduceLROnPlateau reducing learning rate to 5.960464760645934e-11.
Epoch 255/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 256/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 257/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 258/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 259/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 260/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 261/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 262/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 263/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 264/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00264: ReduceLROnPlateau reducing learning rate to 2.980232380322967e-11.
Epoch 265/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 266/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 267/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 268/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 269/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 270/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 271/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 272/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 273/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 274/1000
231/231 [==============================] - 0s 117us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00274: ReduceLROnPlateau reducing learning rate to 1.4901161901614834e-11.
Epoch 275/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 276/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 277/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 278/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 279/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 280/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 281/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 282/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 283/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 284/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00284: ReduceLROnPlateau reducing learning rate to 7.450580950807417e-12.
Epoch 285/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 286/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 287/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 288/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 289/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 290/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 291/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 292/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 293/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 294/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00294: ReduceLROnPlateau reducing learning rate to 3.725290475403709e-12.
Epoch 295/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 296/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 297/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 298/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 299/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 300/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 301/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 302/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 303/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 304/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00304: ReduceLROnPlateau reducing learning rate to 1.8626452377018543e-12.
Epoch 305/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 306/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 307/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 308/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 309/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 310/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 311/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 312/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 313/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 314/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00314: ReduceLROnPlateau reducing learning rate to 9.313226188509272e-13.
Epoch 315/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 316/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 317/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 318/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 319/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 320/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 321/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 322/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 323/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 324/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00324: ReduceLROnPlateau reducing learning rate to 4.656613094254636e-13.
Epoch 325/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 326/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 327/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 328/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 329/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 330/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 331/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 332/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 333/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 334/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00334: ReduceLROnPlateau reducing learning rate to 2.328306547127318e-13.
Epoch 335/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 336/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 337/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 338/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 339/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 340/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 341/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 342/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 343/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 344/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00344: ReduceLROnPlateau reducing learning rate to 1.164153273563659e-13.
Epoch 345/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 346/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 347/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 348/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 349/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 350/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 351/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 352/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 353/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 354/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00354: ReduceLROnPlateau reducing learning rate to 5.820766367818295e-14.
Epoch 355/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 356/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 357/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 358/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 359/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 360/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 361/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 362/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 363/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 364/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00364: ReduceLROnPlateau reducing learning rate to 2.9103831839091474e-14.
Epoch 365/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 366/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 367/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 368/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 369/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 370/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 371/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 372/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 373/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 374/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00374: ReduceLROnPlateau reducing learning rate to 1.4551915919545737e-14.
Epoch 375/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 376/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 377/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 378/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 379/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 380/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 381/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 382/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 383/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 384/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00384: ReduceLROnPlateau reducing learning rate to 7.275957959772868e-15.
Epoch 385/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 386/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 387/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 388/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 389/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 390/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 391/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 392/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 393/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 394/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00394: ReduceLROnPlateau reducing learning rate to 3.637978979886434e-15.
Epoch 395/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 396/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 397/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 398/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 399/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 400/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 401/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 402/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 403/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 404/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00404: ReduceLROnPlateau reducing learning rate to 1.818989489943217e-15.
Epoch 405/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 406/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 407/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 408/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 409/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 410/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 411/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 412/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 413/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 414/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00414: ReduceLROnPlateau reducing learning rate to 9.094947449716085e-16.
Epoch 415/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 416/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 417/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 418/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 419/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 420/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 421/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 422/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 423/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 424/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00424: ReduceLROnPlateau reducing learning rate to 4.547473724858043e-16.
Epoch 425/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 426/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 427/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 428/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 429/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 430/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 431/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 432/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 433/1000
231/231 [==============================] - 0s 117us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 434/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00434: ReduceLROnPlateau reducing learning rate to 2.2737368624290214e-16.
Epoch 435/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 436/1000
231/231 [==============================] - ETA: 0s - loss: 0.6507 - accuracy: 0.62 - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 437/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 438/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 439/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 440/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 441/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 442/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 443/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 444/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00444: ReduceLROnPlateau reducing learning rate to 1.1368684312145107e-16.
Epoch 445/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 446/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 447/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 448/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 449/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 450/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 451/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 452/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 453/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 454/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00454: ReduceLROnPlateau reducing learning rate to 5.684342156072553e-17.
Epoch 455/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 456/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 457/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 458/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 459/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 460/1000
231/231 [==============================] - 0s 203us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 461/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 462/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 463/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 464/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00464: ReduceLROnPlateau reducing learning rate to 2.842171078036277e-17.
Epoch 465/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 466/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 467/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 468/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 469/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 470/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 471/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 472/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 473/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 474/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00474: ReduceLROnPlateau reducing learning rate to 1.4210855390181384e-17.
Epoch 475/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 476/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 477/1000
231/231 [==============================] - 0s 69us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 478/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 479/1000
231/231 [==============================] - 0s 125us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 480/1000
231/231 [==============================] - ETA: 0s - loss: 0.4785 - accuracy: 0.78 - 0s 143us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 481/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 482/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 483/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 484/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00484: ReduceLROnPlateau reducing learning rate to 7.105427695090692e-18.
Epoch 485/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 486/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 487/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 488/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 489/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 490/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 491/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 492/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 493/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 494/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00494: ReduceLROnPlateau reducing learning rate to 3.552713847545346e-18.
Epoch 495/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 496/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 497/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 498/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 499/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 500/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 501/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 502/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 503/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 504/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00504: ReduceLROnPlateau reducing learning rate to 1.776356923772673e-18.
Epoch 505/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 506/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 507/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 508/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 509/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 510/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 511/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 512/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 513/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 514/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00514: ReduceLROnPlateau reducing learning rate to 8.881784618863365e-19.
Epoch 515/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 516/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 517/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 518/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 519/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 520/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 521/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 522/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 523/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 524/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00524: ReduceLROnPlateau reducing learning rate to 4.440892309431682e-19.
Epoch 525/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 526/1000
231/231 [==============================] - 0s 117us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 527/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 528/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 529/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 530/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 531/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 532/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 533/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 534/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00534: ReduceLROnPlateau reducing learning rate to 2.220446154715841e-19.
Epoch 535/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 536/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 537/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 538/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 539/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 540/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 541/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 542/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 543/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 544/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00544: ReduceLROnPlateau reducing learning rate to 1.1102230773579206e-19.
Epoch 545/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 546/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 547/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 548/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 549/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 550/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 551/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 552/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 553/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 554/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00554: ReduceLROnPlateau reducing learning rate to 5.551115386789603e-20.
Epoch 555/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 556/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 557/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 558/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 559/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 560/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 561/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 562/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 563/1000
231/231 [==============================] - 0s 125us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 564/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00564: ReduceLROnPlateau reducing learning rate to 2.7755576933948015e-20.
Epoch 565/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 566/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 567/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 568/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 569/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 570/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 571/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 572/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 573/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 574/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00574: ReduceLROnPlateau reducing learning rate to 1.3877788466974007e-20.
Epoch 575/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 576/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 577/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 578/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 579/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 580/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 581/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 582/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 583/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 584/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00584: ReduceLROnPlateau reducing learning rate to 6.938894233487004e-21.
Epoch 585/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 586/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 587/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 588/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 589/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 590/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 591/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 592/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 593/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 594/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00594: ReduceLROnPlateau reducing learning rate to 3.469447116743502e-21.
Epoch 595/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 596/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 597/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 598/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 599/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 600/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 601/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 602/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 603/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 604/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00604: ReduceLROnPlateau reducing learning rate to 1.734723558371751e-21.
Epoch 605/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 606/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 607/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 608/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 609/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 610/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 611/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 612/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 613/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 614/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00614: ReduceLROnPlateau reducing learning rate to 8.673617791858755e-22.
Epoch 615/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 616/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 617/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 618/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 619/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 620/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 621/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 622/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 623/1000
231/231 [==============================] - 0s 117us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 624/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00624: ReduceLROnPlateau reducing learning rate to 4.336808895929377e-22.
Epoch 625/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 626/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 627/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 628/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 629/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 630/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 631/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 632/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 633/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 634/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00634: ReduceLROnPlateau reducing learning rate to 2.1684044479646887e-22.
Epoch 635/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 636/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 637/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 638/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 639/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 640/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 641/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 642/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 643/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 644/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00644: ReduceLROnPlateau reducing learning rate to 1.0842022239823443e-22.
Epoch 645/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 646/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 647/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 648/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 649/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 650/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 651/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 652/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 653/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 654/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00654: ReduceLROnPlateau reducing learning rate to 5.421011119911722e-23.
Epoch 655/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 656/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 657/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 658/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 659/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 660/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 661/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 662/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 663/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 664/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00664: ReduceLROnPlateau reducing learning rate to 2.710505559955861e-23.
Epoch 665/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 666/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 667/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 668/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 669/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 670/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 671/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 672/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 673/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 674/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00674: ReduceLROnPlateau reducing learning rate to 1.3552527799779304e-23.
Epoch 675/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 676/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 677/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 678/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 679/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 680/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 681/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 682/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 683/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 684/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00684: ReduceLROnPlateau reducing learning rate to 6.776263899889652e-24.
Epoch 685/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 686/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 687/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 688/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 689/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 690/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 691/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 692/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 693/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 694/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00694: ReduceLROnPlateau reducing learning rate to 3.388131949944826e-24.
Epoch 695/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 696/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 697/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 698/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 699/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 700/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 701/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 702/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 703/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 704/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00704: ReduceLROnPlateau reducing learning rate to 1.694065974972413e-24.
Epoch 705/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 706/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 707/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 708/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 709/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 710/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 711/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 712/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 713/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 714/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00714: ReduceLROnPlateau reducing learning rate to 8.470329874862065e-25.
Epoch 715/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 716/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 717/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 718/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 719/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 720/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 721/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 722/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 723/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 724/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00724: ReduceLROnPlateau reducing learning rate to 4.2351649374310325e-25.
Epoch 725/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 726/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 727/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 728/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 729/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 730/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 731/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 732/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 733/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 734/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00734: ReduceLROnPlateau reducing learning rate to 2.1175824687155163e-25.
Epoch 735/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 736/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 737/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 738/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 739/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 740/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 741/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 742/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 743/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 744/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00744: ReduceLROnPlateau reducing learning rate to 1.0587912343577581e-25.
Epoch 745/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 746/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 747/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 748/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 749/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 750/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 751/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 752/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 753/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 754/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00754: ReduceLROnPlateau reducing learning rate to 5.293956171788791e-26.
Epoch 755/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 756/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 757/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 758/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 759/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 760/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 761/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 762/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 763/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 764/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00764: ReduceLROnPlateau reducing learning rate to 2.6469780858943953e-26.
Epoch 765/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 766/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 767/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 768/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 769/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 770/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 771/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 772/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 773/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 774/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00774: ReduceLROnPlateau reducing learning rate to 1.3234890429471977e-26.
Epoch 775/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 776/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 777/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 778/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 779/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 780/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 781/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 782/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 783/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 784/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00784: ReduceLROnPlateau reducing learning rate to 6.617445214735988e-27.
Epoch 785/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 786/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 787/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 788/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 789/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 790/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 791/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 792/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 793/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 794/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00794: ReduceLROnPlateau reducing learning rate to 3.308722607367994e-27.
Epoch 795/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 796/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 797/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 798/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 799/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 800/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 801/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 802/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 803/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 804/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00804: ReduceLROnPlateau reducing learning rate to 1.654361303683997e-27.
Epoch 805/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 806/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 807/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 808/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 809/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 810/1000
231/231 [==============================] - 0s 130us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 811/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 812/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 813/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 814/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00814: ReduceLROnPlateau reducing learning rate to 8.271806518419985e-28.
Epoch 815/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 816/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 817/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 818/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 819/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 820/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 821/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 822/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 823/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 824/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00824: ReduceLROnPlateau reducing learning rate to 4.135903259209993e-28.
Epoch 825/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 826/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 827/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 828/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 829/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 830/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 831/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 832/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 833/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 834/1000
231/231 [==============================] - 0s 117us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00834: ReduceLROnPlateau reducing learning rate to 2.0679516296049964e-28.
Epoch 835/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 836/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 837/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 838/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 839/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 840/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 841/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 842/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 843/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 844/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00844: ReduceLROnPlateau reducing learning rate to 1.0339758148024982e-28.
Epoch 845/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 846/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 847/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 848/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 849/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 850/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 851/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 852/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 853/1000
231/231 [==============================] - 0s 121us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 854/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00854: ReduceLROnPlateau reducing learning rate to 5.169879074012491e-29.
Epoch 855/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 856/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 857/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 858/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 859/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 860/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 861/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 862/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 863/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 864/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00864: ReduceLROnPlateau reducing learning rate to 2.5849395370062454e-29.
Epoch 865/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 866/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 867/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 868/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 869/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 870/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 871/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 872/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 873/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 874/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00874: ReduceLROnPlateau reducing learning rate to 1.2924697685031227e-29.
Epoch 875/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 876/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 877/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 878/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 879/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 880/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 881/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 882/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 883/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 884/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00884: ReduceLROnPlateau reducing learning rate to 6.462348842515614e-30.
Epoch 885/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 886/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 887/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 888/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 889/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 890/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 891/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 892/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 893/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 894/1000
231/231 [==============================] - 0s 117us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00894: ReduceLROnPlateau reducing learning rate to 3.231174421257807e-30.
Epoch 895/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 896/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 897/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 898/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 899/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 900/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 901/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 902/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 903/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 904/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00904: ReduceLROnPlateau reducing learning rate to 1.6155872106289034e-30.
Epoch 905/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 906/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 907/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 908/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 909/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 910/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 911/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 912/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 913/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 914/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00914: ReduceLROnPlateau reducing learning rate to 8.077936053144517e-31.
Epoch 915/1000
231/231 [==============================] - 0s 100us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 916/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 917/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 918/1000
231/231 [==============================] - 0s 130us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 919/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 920/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 921/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 922/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 923/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 924/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00924: ReduceLROnPlateau reducing learning rate to 4.0389680265722585e-31.
Epoch 925/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 926/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 927/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 928/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 929/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 930/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 931/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 932/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 933/1000
231/231 [==============================] - 0s 121us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 934/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00934: ReduceLROnPlateau reducing learning rate to 2.0194840132861292e-31.
Epoch 935/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 936/1000
231/231 [==============================] - 0s 143us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 937/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 938/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 939/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 940/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 941/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 942/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 943/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 944/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00944: ReduceLROnPlateau reducing learning rate to 1.0097420066430646e-31.
Epoch 945/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 946/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 947/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 948/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 949/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 950/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 951/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 952/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 953/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 954/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00954: ReduceLROnPlateau reducing learning rate to 5.048710033215323e-32.
Epoch 955/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 956/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 957/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 958/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 959/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 960/1000
231/231 [==============================] - 0s 108us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 961/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 962/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 963/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 964/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00964: ReduceLROnPlateau reducing learning rate to 2.5243550166076616e-32.
Epoch 965/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 966/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 967/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 968/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 969/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 970/1000
231/231 [==============================] - 0s 117us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 971/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 972/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 973/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 974/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00974: ReduceLROnPlateau reducing learning rate to 1.2621775083038308e-32.
Epoch 975/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 976/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 977/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 978/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 979/1000
231/231 [==============================] - 0s 78us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 980/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 981/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 982/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 983/1000
231/231 [==============================] - 0s 112us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 984/1000
231/231 [==============================] - 0s 104us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00984: ReduceLROnPlateau reducing learning rate to 6.310887541519154e-33.
Epoch 985/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 986/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 987/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 988/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 989/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 990/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 991/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 992/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 993/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 994/1000
231/231 [==============================] - 0s 74us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436

Epoch 00994: ReduceLROnPlateau reducing learning rate to 3.155443770759577e-33.
Epoch 995/1000
231/231 [==============================] - 0s 95us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 996/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 997/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 998/1000
231/231 [==============================] - 0s 91us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 999/1000
231/231 [==============================] - 0s 87us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
Epoch 1000/1000
231/231 [==============================] - 0s 82us/step - loss: 0.4221 - accuracy: 0.8139 - val_loss: 0.5342 - val_accuracy: 0.7436
In [57]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 2000)
In [90]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
78/78 [==============================] - 0s 77us/step
test loss: 0.5342327150014731, test accuracy: 0.7435897588729858
In [91]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.655664585191793
In [92]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
Kappa:  0.07253269916765748

KMeans

In [93]:
X
Out[93]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13
0 -0.784459 0.109342 0.681608 1.151904 -0.707724 0.736365 0.241404 -0.461605 1.084621 0.123497 -0.150398 1.784532 0.824544
1 0.213444 0.453851 0.215705 0.987439 -1.851748 -0.001814 -0.218203 0.548263 -0.521851 1.253720 0.882518 -0.892913 0.218184
2 1.478029 0.664210 0.484232 0.450284 -0.427587 -0.448079 -0.195272 -0.610679 -0.675633 0.759098 -0.107303 -0.440790 -1.398093
3 -0.846386 -0.742706 -0.734786 -0.199585 -0.328948 -0.230911 0.620910 0.911236 1.274407 0.150882 -0.603865 -0.919849 0.386069
4 -0.952033 -0.794706 -1.141199 -0.070979 0.454453 0.544600 1.427005 1.918539 1.204102 0.076961 -0.328712 -1.051776 -0.151007
5 -1.244473 2.250723 2.321007 0.549219 1.971653 -1.427849 0.392314 -1.015093 0.157566 0.486970 -0.455070 0.229936 0.185742
6 -0.128652 0.958605 1.191477 0.705974 0.793937 -0.709130 -0.221572 0.922219 0.500475 -0.571099 0.521526 0.567361 -0.486761
7 -1.148662 0.562286 0.636059 0.222732 0.741470 0.009108 -0.949365 -0.486631 -0.519627 -0.739600 -0.415884 -0.026850 0.290685
8 -0.440190 0.461355 -0.016542 -0.158636 0.985626 -0.417310 0.182134 0.290631 0.379949 0.069159 1.080013 0.220566 -0.042505
9 -0.695730 0.398404 1.069978 -0.124019 0.736597 -0.912452 0.673156 0.604840 0.175505 0.496158 0.541788 0.649837 -0.680054
10 -0.006662 0.163832 1.373872 -0.095120 1.621755 1.048509 0.997122 0.721763 0.660834 -1.076324 0.925997 -0.147393 -0.420465
11 -0.771763 -0.484525 -0.874411 0.647747 -1.241650 0.190918 0.457290 0.915208 1.999689 1.879761 0.491598 -0.164372 -0.560754
12 0.140770 1.869847 -1.926303 -2.491201 -2.679759 -1.527330 -0.299345 -0.550878 0.702947 0.143961 0.034796 -0.379551 -0.422354
13 -1.952477 -0.949813 0.063314 1.188657 1.059601 1.221319 0.070346 2.284107 2.889527 2.012105 1.053494 -0.178905 -2.004333
14 -0.895529 0.398850 -0.469782 1.216393 0.657294 -0.550619 -0.854637 -0.815454 1.929689 1.499328 -0.096775 -0.174183 -1.119396
15 -1.161372 1.475106 1.486594 0.127516 0.213940 0.587080 -0.789652 0.130203 1.199389 1.458358 0.404206 0.754289 -0.784214
16 -0.476792 2.179287 0.101035 -1.393755 -0.740834 0.589666 0.873850 0.630539 0.535702 0.387326 -0.979677 0.259755 0.313358
17 -0.089088 -0.841832 0.674093 -0.842623 0.904577 -1.476862 1.853427 -1.108621 0.720923 0.383320 -1.842030 1.712321 -1.612726
18 -1.772732 0.488101 0.057829 0.041074 0.732429 1.052187 0.279830 -0.350521 -0.476338 -0.833438 0.184849 -0.055428 0.627307
19 -0.640351 0.068493 0.619966 -0.599171 0.860806 -0.385120 1.955087 -1.014740 1.224043 1.450896 -2.604448 2.187869 -0.464774
20 0.590240 0.699904 -0.097902 0.127319 -0.882999 0.319144 -0.146142 -0.540616 0.300593 0.688863 0.314647 0.709538 0.572811
21 0.500240 0.875222 -0.833826 0.377484 0.023480 1.321472 1.094037 0.734507 0.141947 0.214524 0.508556 -0.265911 -0.372316
22 -0.076653 0.518030 0.003390 0.452969 -0.218736 0.115409 0.332618 0.611098 0.211893 -0.206368 0.358363 0.614915 0.518172
23 0.010763 -0.352873 -0.460051 0.423968 -0.228393 -0.040296 -0.740869 -0.810034 -1.379366 -0.179024 0.147810 -0.224826 0.615011
24 0.874600 0.173728 -1.041125 0.845285 1.139221 0.264458 -0.378878 0.430226 -0.568469 -1.237333 0.032074 0.812111 0.431460
25 0.200637 0.337376 0.022126 1.189135 -0.210135 -1.195492 0.067874 1.349711 -0.534365 -0.132754 0.055132 0.239009 -0.275633
26 0.362627 0.159292 -1.211688 -0.555502 0.107540 0.797027 -0.246321 -1.113565 -1.373054 -2.369077 -0.539483 1.032005 1.637730
27 -0.504648 -0.561515 -2.173809 -1.525691 -0.810132 -0.617474 0.441103 1.146056 1.464488 -1.111032 -0.742722 0.034623 0.200147
28 -0.339646 -2.140319 -1.409226 -0.207553 -1.216547 -1.135346 -0.831817 1.136334 -0.187159 1.388841 0.282573 -0.807850 -0.371992
29 -1.279089 1.555887 0.890503 2.134195 0.337580 -0.037382 -2.046955 -2.888113 1.329665 1.436687 -1.576201 0.485256 1.429246
... ... ... ... ... ... ... ... ... ... ... ... ... ...
279 -1.132789 -0.931481 -0.350024 -0.228575 -1.201208 -1.044342 0.532403 1.667036 1.383485 -0.967474 -0.286625 -1.920618 -0.797190
280 -0.375948 0.058369 0.489068 0.862825 -1.876102 -0.195043 -1.163295 0.716190 0.384576 -0.168340 1.542126 -0.769460 0.456686
281 0.412883 -1.703432 -0.514845 -1.382818 -0.713972 -0.476089 1.471006 0.826485 0.508608 -1.311788 -2.010635 -1.122699 -0.848851
282 -0.152329 -2.012108 -0.217355 -1.122627 -0.851075 0.634424 1.711007 0.281350 -0.565156 -1.667195 -1.942452 -1.586592 -0.485128
283 0.348443 -2.381428 1.267515 -1.713290 0.161262 -1.589515 1.383857 -0.218429 0.412550 0.382171 -1.073499 -1.745128 -3.227845
284 -0.895866 1.001673 1.059356 0.166883 -0.710729 0.466737 -0.857566 -0.158962 0.004241 0.391823 0.576231 0.329506 -1.331272
285 0.417102 1.957515 2.350604 -1.125042 -2.206390 -0.674814 -1.217854 0.372865 0.840465 -0.472910 0.310419 1.379494 1.128412
286 -0.900897 -0.289100 0.433265 -0.281829 -0.379951 1.272236 0.313949 -0.261980 -0.053111 0.473694 0.493962 -0.263293 -0.657598
287 -0.002448 -0.853612 0.441903 0.406478 -0.823085 0.590185 -0.292046 -0.079952 -0.422138 0.579522 -0.620415 -0.298847 0.620798
288 -0.528092 -1.022206 -0.348679 0.093718 -1.642833 -2.355166 -0.992806 -0.143423 0.270521 0.838321 0.843686 0.469574 -0.325121
289 -0.387248 -1.305014 -0.365540 0.202745 -0.906016 -1.785190 -1.377992 -0.544742 -0.670979 -0.785606 0.505505 0.502505 -0.151297
290 0.425324 -2.583173 -2.181080 -1.262030 -0.179265 0.176164 1.763096 0.436737 -2.048534 -1.014266 1.298221 0.401742 -1.080608
291 -0.572282 -0.375532 -2.067885 -0.361247 -0.315065 -0.671820 -0.183865 -0.517694 -0.802956 -0.951809 0.282442 0.208005 -0.271252
292 -0.084382 -1.508230 -0.105496 -1.930204 -1.529664 -0.795467 1.273717 -1.858542 -0.446361 -0.239346 0.154464 -0.114937 -1.831603
293 -1.172703 0.783209 -1.141589 -0.982768 -0.513216 0.655437 1.962510 0.628858 1.130028 1.104741 1.539591 1.547843 -0.011302
294 -1.293038 0.838303 -1.049071 -0.708031 -0.779995 0.868108 1.621994 0.725495 1.173585 1.424395 1.751950 1.352876 0.339922
295 0.809878 -0.351504 -2.231752 -0.556719 -1.430264 -0.357918 -0.727837 1.110363 1.684188 0.429768 0.560061 0.371789 -1.110030
296 0.305449 -0.148924 -0.727054 -0.126830 0.467272 0.420013 1.212777 0.954055 -0.988419 -0.423614 -0.047239 0.058678 -0.031517
297 1.212224 1.916789 0.287969 -0.073842 0.289112 0.943764 -0.395404 -0.380613 0.262567 0.759137 0.277177 0.493951 1.026995
298 -0.121307 0.217217 0.030920 -0.201270 -0.752001 -0.276070 0.835502 -0.363704 -0.641199 0.283313 0.060013 0.013280 0.477857
299 -0.622824 -0.595352 0.256282 -0.111551 0.023990 1.221659 1.572998 -0.263983 -0.707828 0.707801 0.306249 1.046476 0.214979
300 -0.667480 -0.808638 0.730781 0.054549 0.191421 0.279885 0.088177 0.823617 0.604299 0.640274 -0.360151 1.298688 0.494875
301 0.928382 -2.375767 -0.427528 -0.852350 -1.137004 1.584181 -1.700220 -2.060965 -1.326622 0.451948 0.593212 0.152418 -0.128797
302 -0.483888 0.443846 0.129714 0.199624 -0.106985 0.817702 -0.072817 -1.163918 0.545762 -0.141320 0.041767 -0.402181 0.061897
303 0.715769 0.780533 1.467750 -0.595580 -1.178484 4.014345 -0.112339 -1.611382 -0.295511 0.032462 1.836607 -4.315898 -1.084441
304 0.041466 -0.470275 0.234655 0.109532 -0.518455 -0.977540 -0.613498 -1.108545 0.500653 -0.214143 -0.033265 -0.541673 0.714974
305 0.818747 0.495675 1.005686 0.967334 0.505171 -0.579478 -0.847677 1.574323 1.544556 0.412556 -0.972040 0.290457 0.289042
306 1.062928 -1.149587 1.951840 -0.065775 0.546680 0.994901 -1.817826 2.109742 0.264443 0.505287 -0.757462 0.578677 0.222503
307 -0.701621 -0.049803 -0.719153 -0.048069 1.223251 1.913492 0.887449 0.038186 0.546172 -0.568362 -1.091833 -0.250367 0.831399
308 -0.079821 0.796085 -0.215763 -1.396439 -0.133350 0.582037 2.442796 0.743250 -1.182753 -0.723658 -0.879934 -2.498899 -1.532262

309 rows × 13 columns

In [94]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[94]:
[4016.9999999999995,
 3599.8293806720085,
 3349.349727264702,
 3159.6428991584926,
 3002.905895600155,
 2909.3645052598604,
 2804.695882234172,
 2720.275460001156,
 2622.695881163609,
 2543.6022931320426,
 2484.176525692807,
 2436.6681239209124,
 2402.363548718592,
 2338.201438573343]
In [95]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[95]:
[<matplotlib.lines.Line2D at 0x1e82b4d8dd8>]

K=2

In [96]:
kmeans_ch = KMeans(n_clusters=2, random_state=0, n_init=10)
kmeans_ch.fit(X)
Out[96]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=2, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [97]:
kmeans_ch.labels_
Out[97]:
array([1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0,
       0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0,
       0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1,
       1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1,
       0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0,
       0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1,
       1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0,
       1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1,
       1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1,
       0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1,
       1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,
       0])
In [98]:
clusters_ch = kmeans_ch.predict(X)
clusters_ch
Out[98]:
array([1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0,
       0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0,
       0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1,
       1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1,
       0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0,
       0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1,
       1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0,
       1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1,
       1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1,
       0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1,
       1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,
       0])
In [99]:
X.loc[:,'Cluster'] = clusters_ch
X.loc[:,'chosen'] = list(y)
In [100]:
X
Out[100]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13 Cluster chosen
0 -0.784459 0.109342 0.681608 1.151904 -0.707724 0.736365 0.241404 -0.461605 1.084621 0.123497 -0.150398 1.784532 0.824544 1 0
1 0.213444 0.453851 0.215705 0.987439 -1.851748 -0.001814 -0.218203 0.548263 -0.521851 1.253720 0.882518 -0.892913 0.218184 1 0
2 1.478029 0.664210 0.484232 0.450284 -0.427587 -0.448079 -0.195272 -0.610679 -0.675633 0.759098 -0.107303 -0.440790 -1.398093 0 0
3 -0.846386 -0.742706 -0.734786 -0.199585 -0.328948 -0.230911 0.620910 0.911236 1.274407 0.150882 -0.603865 -0.919849 0.386069 0 0
4 -0.952033 -0.794706 -1.141199 -0.070979 0.454453 0.544600 1.427005 1.918539 1.204102 0.076961 -0.328712 -1.051776 -0.151007 0 0
5 -1.244473 2.250723 2.321007 0.549219 1.971653 -1.427849 0.392314 -1.015093 0.157566 0.486970 -0.455070 0.229936 0.185742 1 0
6 -0.128652 0.958605 1.191477 0.705974 0.793937 -0.709130 -0.221572 0.922219 0.500475 -0.571099 0.521526 0.567361 -0.486761 1 0
7 -1.148662 0.562286 0.636059 0.222732 0.741470 0.009108 -0.949365 -0.486631 -0.519627 -0.739600 -0.415884 -0.026850 0.290685 1 0
8 -0.440190 0.461355 -0.016542 -0.158636 0.985626 -0.417310 0.182134 0.290631 0.379949 0.069159 1.080013 0.220566 -0.042505 1 0
9 -0.695730 0.398404 1.069978 -0.124019 0.736597 -0.912452 0.673156 0.604840 0.175505 0.496158 0.541788 0.649837 -0.680054 1 0
10 -0.006662 0.163832 1.373872 -0.095120 1.621755 1.048509 0.997122 0.721763 0.660834 -1.076324 0.925997 -0.147393 -0.420465 1 0
11 -0.771763 -0.484525 -0.874411 0.647747 -1.241650 0.190918 0.457290 0.915208 1.999689 1.879761 0.491598 -0.164372 -0.560754 1 0
12 0.140770 1.869847 -1.926303 -2.491201 -2.679759 -1.527330 -0.299345 -0.550878 0.702947 0.143961 0.034796 -0.379551 -0.422354 0 0
13 -1.952477 -0.949813 0.063314 1.188657 1.059601 1.221319 0.070346 2.284107 2.889527 2.012105 1.053494 -0.178905 -2.004333 1 0
14 -0.895529 0.398850 -0.469782 1.216393 0.657294 -0.550619 -0.854637 -0.815454 1.929689 1.499328 -0.096775 -0.174183 -1.119396 1 0
15 -1.161372 1.475106 1.486594 0.127516 0.213940 0.587080 -0.789652 0.130203 1.199389 1.458358 0.404206 0.754289 -0.784214 1 0
16 -0.476792 2.179287 0.101035 -1.393755 -0.740834 0.589666 0.873850 0.630539 0.535702 0.387326 -0.979677 0.259755 0.313358 1 0
17 -0.089088 -0.841832 0.674093 -0.842623 0.904577 -1.476862 1.853427 -1.108621 0.720923 0.383320 -1.842030 1.712321 -1.612726 0 0
18 -1.772732 0.488101 0.057829 0.041074 0.732429 1.052187 0.279830 -0.350521 -0.476338 -0.833438 0.184849 -0.055428 0.627307 1 0
19 -0.640351 0.068493 0.619966 -0.599171 0.860806 -0.385120 1.955087 -1.014740 1.224043 1.450896 -2.604448 2.187869 -0.464774 1 0
20 0.590240 0.699904 -0.097902 0.127319 -0.882999 0.319144 -0.146142 -0.540616 0.300593 0.688863 0.314647 0.709538 0.572811 1 0
21 0.500240 0.875222 -0.833826 0.377484 0.023480 1.321472 1.094037 0.734507 0.141947 0.214524 0.508556 -0.265911 -0.372316 1 0
22 -0.076653 0.518030 0.003390 0.452969 -0.218736 0.115409 0.332618 0.611098 0.211893 -0.206368 0.358363 0.614915 0.518172 1 0
23 0.010763 -0.352873 -0.460051 0.423968 -0.228393 -0.040296 -0.740869 -0.810034 -1.379366 -0.179024 0.147810 -0.224826 0.615011 1 0
24 0.874600 0.173728 -1.041125 0.845285 1.139221 0.264458 -0.378878 0.430226 -0.568469 -1.237333 0.032074 0.812111 0.431460 1 0
25 0.200637 0.337376 0.022126 1.189135 -0.210135 -1.195492 0.067874 1.349711 -0.534365 -0.132754 0.055132 0.239009 -0.275633 1 0
26 0.362627 0.159292 -1.211688 -0.555502 0.107540 0.797027 -0.246321 -1.113565 -1.373054 -2.369077 -0.539483 1.032005 1.637730 1 0
27 -0.504648 -0.561515 -2.173809 -1.525691 -0.810132 -0.617474 0.441103 1.146056 1.464488 -1.111032 -0.742722 0.034623 0.200147 0 0
28 -0.339646 -2.140319 -1.409226 -0.207553 -1.216547 -1.135346 -0.831817 1.136334 -0.187159 1.388841 0.282573 -0.807850 -0.371992 0 0
29 -1.279089 1.555887 0.890503 2.134195 0.337580 -0.037382 -2.046955 -2.888113 1.329665 1.436687 -1.576201 0.485256 1.429246 1 0
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
279 -1.132789 -0.931481 -0.350024 -0.228575 -1.201208 -1.044342 0.532403 1.667036 1.383485 -0.967474 -0.286625 -1.920618 -0.797190 0 1
280 -0.375948 0.058369 0.489068 0.862825 -1.876102 -0.195043 -1.163295 0.716190 0.384576 -0.168340 1.542126 -0.769460 0.456686 1 1
281 0.412883 -1.703432 -0.514845 -1.382818 -0.713972 -0.476089 1.471006 0.826485 0.508608 -1.311788 -2.010635 -1.122699 -0.848851 0 1
282 -0.152329 -2.012108 -0.217355 -1.122627 -0.851075 0.634424 1.711007 0.281350 -0.565156 -1.667195 -1.942452 -1.586592 -0.485128 0 1
283 0.348443 -2.381428 1.267515 -1.713290 0.161262 -1.589515 1.383857 -0.218429 0.412550 0.382171 -1.073499 -1.745128 -3.227845 0 1
284 -0.895866 1.001673 1.059356 0.166883 -0.710729 0.466737 -0.857566 -0.158962 0.004241 0.391823 0.576231 0.329506 -1.331272 1 1
285 0.417102 1.957515 2.350604 -1.125042 -2.206390 -0.674814 -1.217854 0.372865 0.840465 -0.472910 0.310419 1.379494 1.128412 1 1
286 -0.900897 -0.289100 0.433265 -0.281829 -0.379951 1.272236 0.313949 -0.261980 -0.053111 0.473694 0.493962 -0.263293 -0.657598 1 1
287 -0.002448 -0.853612 0.441903 0.406478 -0.823085 0.590185 -0.292046 -0.079952 -0.422138 0.579522 -0.620415 -0.298847 0.620798 1 1
288 -0.528092 -1.022206 -0.348679 0.093718 -1.642833 -2.355166 -0.992806 -0.143423 0.270521 0.838321 0.843686 0.469574 -0.325121 0 1
289 -0.387248 -1.305014 -0.365540 0.202745 -0.906016 -1.785190 -1.377992 -0.544742 -0.670979 -0.785606 0.505505 0.502505 -0.151297 0 1
290 0.425324 -2.583173 -2.181080 -1.262030 -0.179265 0.176164 1.763096 0.436737 -2.048534 -1.014266 1.298221 0.401742 -1.080608 0 1
291 -0.572282 -0.375532 -2.067885 -0.361247 -0.315065 -0.671820 -0.183865 -0.517694 -0.802956 -0.951809 0.282442 0.208005 -0.271252 0 1
292 -0.084382 -1.508230 -0.105496 -1.930204 -1.529664 -0.795467 1.273717 -1.858542 -0.446361 -0.239346 0.154464 -0.114937 -1.831603 0 1
293 -1.172703 0.783209 -1.141589 -0.982768 -0.513216 0.655437 1.962510 0.628858 1.130028 1.104741 1.539591 1.547843 -0.011302 1 1
294 -1.293038 0.838303 -1.049071 -0.708031 -0.779995 0.868108 1.621994 0.725495 1.173585 1.424395 1.751950 1.352876 0.339922 1 1
295 0.809878 -0.351504 -2.231752 -0.556719 -1.430264 -0.357918 -0.727837 1.110363 1.684188 0.429768 0.560061 0.371789 -1.110030 0 1
296 0.305449 -0.148924 -0.727054 -0.126830 0.467272 0.420013 1.212777 0.954055 -0.988419 -0.423614 -0.047239 0.058678 -0.031517 0 1
297 1.212224 1.916789 0.287969 -0.073842 0.289112 0.943764 -0.395404 -0.380613 0.262567 0.759137 0.277177 0.493951 1.026995 1 1
298 -0.121307 0.217217 0.030920 -0.201270 -0.752001 -0.276070 0.835502 -0.363704 -0.641199 0.283313 0.060013 0.013280 0.477857 1 1
299 -0.622824 -0.595352 0.256282 -0.111551 0.023990 1.221659 1.572998 -0.263983 -0.707828 0.707801 0.306249 1.046476 0.214979 1 1
300 -0.667480 -0.808638 0.730781 0.054549 0.191421 0.279885 0.088177 0.823617 0.604299 0.640274 -0.360151 1.298688 0.494875 1 1
301 0.928382 -2.375767 -0.427528 -0.852350 -1.137004 1.584181 -1.700220 -2.060965 -1.326622 0.451948 0.593212 0.152418 -0.128797 0 1
302 -0.483888 0.443846 0.129714 0.199624 -0.106985 0.817702 -0.072817 -1.163918 0.545762 -0.141320 0.041767 -0.402181 0.061897 1 1
303 0.715769 0.780533 1.467750 -0.595580 -1.178484 4.014345 -0.112339 -1.611382 -0.295511 0.032462 1.836607 -4.315898 -1.084441 1 1
304 0.041466 -0.470275 0.234655 0.109532 -0.518455 -0.977540 -0.613498 -1.108545 0.500653 -0.214143 -0.033265 -0.541673 0.714974 1 1
305 0.818747 0.495675 1.005686 0.967334 0.505171 -0.579478 -0.847677 1.574323 1.544556 0.412556 -0.972040 0.290457 0.289042 1 1
306 1.062928 -1.149587 1.951840 -0.065775 0.546680 0.994901 -1.817826 2.109742 0.264443 0.505287 -0.757462 0.578677 0.222503 1 1
307 -0.701621 -0.049803 -0.719153 -0.048069 1.223251 1.913492 0.887449 0.038186 0.546172 -0.568362 -1.091833 -0.250367 0.831399 1 1
308 -0.079821 0.796085 -0.215763 -1.396439 -0.133350 0.582037 2.442796 0.743250 -1.182753 -0.723658 -0.879934 -2.498899 -1.532262 0 1

309 rows × 15 columns

In [101]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[101]:
<matplotlib.axes._subplots.AxesSubplot at 0x1e82b505400>
In [102]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[3]))

Hotel Marrakech

ANN

In [103]:
X = df_n_ps_std_ch[3]
In [104]:
y = df_n_ps[3]['chosen']
In [105]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [106]:
X_train.shape
Out[106]:
(139, 13)
In [107]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [108]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [109]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [110]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'relu', 'hidden_layer_sizes': (30, 20, 10), 'learning_rate_init': 0.003, 'max_iter': 75}, que permiten obtener un Accuracy de 86.33% y un Kappa del 66.54
Tiempo total: 21.38 minutos
In [111]:
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [112]:
input_tensor = Input(shape = (n0,))
In [113]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = 'tanh')(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [114]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [115]:
model.summary()
Model: "model_4"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_4 (InputLayer)         (None, 13)                0         
_________________________________________________________________
dense_10 (Dense)             (None, 30)                420       
_________________________________________________________________
dense_11 (Dense)             (None, 20)                620       
_________________________________________________________________
dense_12 (Dense)             (None, 10)                210       
_________________________________________________________________
dense_13 (Dense)             (None, 1)                 11        
=================================================================
Total params: 1,261
Trainable params: 1,261
Non-trainable params: 0
_________________________________________________________________
In [116]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), batch_size= 32,
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 139 samples, validate on 47 samples
Epoch 1/75
139/139 [==============================] - 0s 2ms/step - loss: 0.7120 - accuracy: 0.4604 - val_loss: 0.6643 - val_accuracy: 0.6383
Epoch 2/75
139/139 [==============================] - 0s 101us/step - loss: 0.5930 - accuracy: 0.7410 - val_loss: 0.6515 - val_accuracy: 0.6809
Epoch 3/75
139/139 [==============================] - 0s 93us/step - loss: 0.5191 - accuracy: 0.7626 - val_loss: 0.6451 - val_accuracy: 0.7234
Epoch 4/75
139/139 [==============================] - 0s 86us/step - loss: 0.4626 - accuracy: 0.7842 - val_loss: 0.6396 - val_accuracy: 0.7872
Epoch 5/75
139/139 [==============================] - 0s 86us/step - loss: 0.4188 - accuracy: 0.8201 - val_loss: 0.6337 - val_accuracy: 0.7660
Epoch 6/75
139/139 [==============================] - 0s 93us/step - loss: 0.3836 - accuracy: 0.8489 - val_loss: 0.6403 - val_accuracy: 0.7660
Epoch 7/75
139/139 [==============================] - 0s 101us/step - loss: 0.3544 - accuracy: 0.8705 - val_loss: 0.6423 - val_accuracy: 0.7660
Epoch 8/75
139/139 [==============================] - 0s 101us/step - loss: 0.3328 - accuracy: 0.8705 - val_loss: 0.6390 - val_accuracy: 0.7660
Epoch 9/75
139/139 [==============================] - 0s 101us/step - loss: 0.3117 - accuracy: 0.8777 - val_loss: 0.6345 - val_accuracy: 0.7660
Epoch 10/75
139/139 [==============================] - 0s 93us/step - loss: 0.3017 - accuracy: 0.8705 - val_loss: 0.6233 - val_accuracy: 0.7660
Epoch 11/75
139/139 [==============================] - 0s 129us/step - loss: 0.2924 - accuracy: 0.8849 - val_loss: 0.6335 - val_accuracy: 0.7872
Epoch 12/75
139/139 [==============================] - 0s 93us/step - loss: 0.2825 - accuracy: 0.8777 - val_loss: 0.6348 - val_accuracy: 0.7872
Epoch 13/75
139/139 [==============================] - 0s 86us/step - loss: 0.2727 - accuracy: 0.8777 - val_loss: 0.6450 - val_accuracy: 0.7872
Epoch 14/75
139/139 [==============================] - 0s 93us/step - loss: 0.2655 - accuracy: 0.8993 - val_loss: 0.6622 - val_accuracy: 0.7872

Epoch 00014: ReduceLROnPlateau reducing learning rate to 0.001500000013038516.
Epoch 15/75
139/139 [==============================] - 0s 93us/step - loss: 0.2583 - accuracy: 0.9065 - val_loss: 0.6709 - val_accuracy: 0.7872
Epoch 16/75
139/139 [==============================] - 0s 86us/step - loss: 0.2544 - accuracy: 0.9065 - val_loss: 0.6659 - val_accuracy: 0.7872
Epoch 17/75
139/139 [==============================] - 0s 101us/step - loss: 0.2510 - accuracy: 0.9065 - val_loss: 0.6681 - val_accuracy: 0.7660
Epoch 18/75
139/139 [==============================] - 0s 93us/step - loss: 0.2473 - accuracy: 0.9137 - val_loss: 0.6822 - val_accuracy: 0.7660
Epoch 19/75
139/139 [==============================] - 0s 86us/step - loss: 0.2434 - accuracy: 0.9137 - val_loss: 0.6909 - val_accuracy: 0.7660
Epoch 20/75
139/139 [==============================] - 0s 79us/step - loss: 0.2402 - accuracy: 0.9137 - val_loss: 0.6924 - val_accuracy: 0.7660
Epoch 21/75
139/139 [==============================] - 0s 137us/step - loss: 0.2373 - accuracy: 0.9137 - val_loss: 0.6904 - val_accuracy: 0.7660
Epoch 22/75
139/139 [==============================] - 0s 101us/step - loss: 0.2347 - accuracy: 0.9137 - val_loss: 0.6874 - val_accuracy: 0.7660
Epoch 23/75
139/139 [==============================] - 0s 93us/step - loss: 0.2313 - accuracy: 0.9137 - val_loss: 0.6854 - val_accuracy: 0.7660
Epoch 24/75
139/139 [==============================] - 0s 86us/step - loss: 0.2278 - accuracy: 0.9137 - val_loss: 0.6835 - val_accuracy: 0.7660

Epoch 00024: ReduceLROnPlateau reducing learning rate to 0.000750000006519258.
Epoch 25/75
139/139 [==============================] - 0s 86us/step - loss: 0.2249 - accuracy: 0.9137 - val_loss: 0.6845 - val_accuracy: 0.7660
Epoch 26/75
139/139 [==============================] - 0s 101us/step - loss: 0.2242 - accuracy: 0.9137 - val_loss: 0.6798 - val_accuracy: 0.7660
Epoch 27/75
139/139 [==============================] - 0s 93us/step - loss: 0.2229 - accuracy: 0.9137 - val_loss: 0.6763 - val_accuracy: 0.7660
Epoch 28/75
139/139 [==============================] - 0s 101us/step - loss: 0.2213 - accuracy: 0.9137 - val_loss: 0.6758 - val_accuracy: 0.7660
Epoch 29/75
139/139 [==============================] - 0s 86us/step - loss: 0.2198 - accuracy: 0.9137 - val_loss: 0.6758 - val_accuracy: 0.7660
Epoch 30/75
139/139 [==============================] - 0s 86us/step - loss: 0.2179 - accuracy: 0.9209 - val_loss: 0.6753 - val_accuracy: 0.7660
Epoch 31/75
139/139 [==============================] - 0s 79us/step - loss: 0.2163 - accuracy: 0.9209 - val_loss: 0.6761 - val_accuracy: 0.7660
Epoch 32/75
139/139 [==============================] - 0s 86us/step - loss: 0.2150 - accuracy: 0.9209 - val_loss: 0.6781 - val_accuracy: 0.7660
Epoch 33/75
139/139 [==============================] - 0s 122us/step - loss: 0.2136 - accuracy: 0.9281 - val_loss: 0.6806 - val_accuracy: 0.7660
Epoch 34/75
139/139 [==============================] - 0s 158us/step - loss: 0.2121 - accuracy: 0.9281 - val_loss: 0.6834 - val_accuracy: 0.7660

Epoch 00034: ReduceLROnPlateau reducing learning rate to 0.000375000003259629.
Epoch 35/75
139/139 [==============================] - 0s 129us/step - loss: 0.2106 - accuracy: 0.9281 - val_loss: 0.6837 - val_accuracy: 0.7660
Epoch 36/75
139/139 [==============================] - 0s 108us/step - loss: 0.2099 - accuracy: 0.9281 - val_loss: 0.6845 - val_accuracy: 0.7660
Epoch 37/75
139/139 [==============================] - 0s 108us/step - loss: 0.2092 - accuracy: 0.9281 - val_loss: 0.6846 - val_accuracy: 0.7660
Epoch 38/75
139/139 [==============================] - 0s 93us/step - loss: 0.2085 - accuracy: 0.9281 - val_loss: 0.6834 - val_accuracy: 0.7660
Epoch 39/75
139/139 [==============================] - 0s 93us/step - loss: 0.2078 - accuracy: 0.9281 - val_loss: 0.6844 - val_accuracy: 0.7660
Epoch 40/75
139/139 [==============================] - 0s 93us/step - loss: 0.2070 - accuracy: 0.9281 - val_loss: 0.6853 - val_accuracy: 0.7660
Epoch 41/75
139/139 [==============================] - 0s 93us/step - loss: 0.2064 - accuracy: 0.9281 - val_loss: 0.6865 - val_accuracy: 0.7660
Epoch 42/75
139/139 [==============================] - 0s 93us/step - loss: 0.2055 - accuracy: 0.9281 - val_loss: 0.6884 - val_accuracy: 0.7660
Epoch 43/75
139/139 [==============================] - 0s 93us/step - loss: 0.2050 - accuracy: 0.9281 - val_loss: 0.6905 - val_accuracy: 0.7660
Epoch 44/75
139/139 [==============================] - 0s 93us/step - loss: 0.2046 - accuracy: 0.9281 - val_loss: 0.6924 - val_accuracy: 0.7660

Epoch 00044: ReduceLROnPlateau reducing learning rate to 0.0001875000016298145.
Epoch 45/75
139/139 [==============================] - 0s 129us/step - loss: 0.2036 - accuracy: 0.9281 - val_loss: 0.6908 - val_accuracy: 0.7660
Epoch 46/75
139/139 [==============================] - 0s 108us/step - loss: 0.2031 - accuracy: 0.9281 - val_loss: 0.6889 - val_accuracy: 0.7660
Epoch 47/75
139/139 [==============================] - 0s 86us/step - loss: 0.2027 - accuracy: 0.9281 - val_loss: 0.6871 - val_accuracy: 0.7660
Epoch 48/75
139/139 [==============================] - 0s 72us/step - loss: 0.2024 - accuracy: 0.9281 - val_loss: 0.6861 - val_accuracy: 0.7660
Epoch 49/75
139/139 [==============================] - 0s 93us/step - loss: 0.2021 - accuracy: 0.9281 - val_loss: 0.6861 - val_accuracy: 0.7660
Epoch 50/75
139/139 [==============================] - 0s 79us/step - loss: 0.2016 - accuracy: 0.9281 - val_loss: 0.6864 - val_accuracy: 0.7660
Epoch 51/75
139/139 [==============================] - 0s 86us/step - loss: 0.2013 - accuracy: 0.9281 - val_loss: 0.6864 - val_accuracy: 0.7660
Epoch 52/75
139/139 [==============================] - 0s 79us/step - loss: 0.2010 - accuracy: 0.9281 - val_loss: 0.6871 - val_accuracy: 0.7660
Epoch 53/75
139/139 [==============================] - 0s 79us/step - loss: 0.2006 - accuracy: 0.9281 - val_loss: 0.6876 - val_accuracy: 0.7660
Epoch 54/75
139/139 [==============================] - 0s 72us/step - loss: 0.2003 - accuracy: 0.9281 - val_loss: 0.6873 - val_accuracy: 0.7660

Epoch 00054: ReduceLROnPlateau reducing learning rate to 9.375000081490725e-05.
Epoch 55/75
139/139 [==============================] - 0s 72us/step - loss: 0.1998 - accuracy: 0.9281 - val_loss: 0.6875 - val_accuracy: 0.7660
Epoch 56/75
139/139 [==============================] - 0s 122us/step - loss: 0.1997 - accuracy: 0.9281 - val_loss: 0.6872 - val_accuracy: 0.7660
Epoch 57/75
139/139 [==============================] - 0s 86us/step - loss: 0.1995 - accuracy: 0.9281 - val_loss: 0.6871 - val_accuracy: 0.7660
Epoch 58/75
139/139 [==============================] - 0s 79us/step - loss: 0.1993 - accuracy: 0.9281 - val_loss: 0.6874 - val_accuracy: 0.7660
Epoch 59/75
139/139 [==============================] - 0s 79us/step - loss: 0.1991 - accuracy: 0.9281 - val_loss: 0.6880 - val_accuracy: 0.7660
Epoch 60/75
139/139 [==============================] - 0s 72us/step - loss: 0.1990 - accuracy: 0.9281 - val_loss: 0.6874 - val_accuracy: 0.7660
Epoch 61/75
139/139 [==============================] - 0s 79us/step - loss: 0.1987 - accuracy: 0.9281 - val_loss: 0.6869 - val_accuracy: 0.7660
Epoch 62/75
139/139 [==============================] - 0s 79us/step - loss: 0.1986 - accuracy: 0.9281 - val_loss: 0.6867 - val_accuracy: 0.7660
Epoch 63/75
139/139 [==============================] - 0s 72us/step - loss: 0.1983 - accuracy: 0.9281 - val_loss: 0.6867 - val_accuracy: 0.7660
Epoch 64/75
139/139 [==============================] - 0s 79us/step - loss: 0.1981 - accuracy: 0.9281 - val_loss: 0.6868 - val_accuracy: 0.7660

Epoch 00064: ReduceLROnPlateau reducing learning rate to 4.6875000407453626e-05.
Epoch 65/75
139/139 [==============================] - 0s 79us/step - loss: 0.1980 - accuracy: 0.9281 - val_loss: 0.6868 - val_accuracy: 0.7660
Epoch 66/75
139/139 [==============================] - 0s 79us/step - loss: 0.1979 - accuracy: 0.9281 - val_loss: 0.6867 - val_accuracy: 0.7660
Epoch 67/75
139/139 [==============================] - 0s 79us/step - loss: 0.1978 - accuracy: 0.9281 - val_loss: 0.6866 - val_accuracy: 0.7660
Epoch 68/75
139/139 [==============================] - 0s 101us/step - loss: 0.1977 - accuracy: 0.9281 - val_loss: 0.6866 - val_accuracy: 0.7660
Epoch 69/75
139/139 [==============================] - 0s 122us/step - loss: 0.1977 - accuracy: 0.9281 - val_loss: 0.6867 - val_accuracy: 0.7660
Epoch 70/75
139/139 [==============================] - 0s 86us/step - loss: 0.1975 - accuracy: 0.9281 - val_loss: 0.6870 - val_accuracy: 0.7660
Epoch 71/75
139/139 [==============================] - 0s 86us/step - loss: 0.1974 - accuracy: 0.9281 - val_loss: 0.6871 - val_accuracy: 0.7660
Epoch 72/75
139/139 [==============================] - 0s 72us/step - loss: 0.1973 - accuracy: 0.9281 - val_loss: 0.6873 - val_accuracy: 0.7660
Epoch 73/75
139/139 [==============================] - 0s 79us/step - loss: 0.1972 - accuracy: 0.9281 - val_loss: 0.6872 - val_accuracy: 0.7660
Epoch 74/75
139/139 [==============================] - 0s 79us/step - loss: 0.1972 - accuracy: 0.9281 - val_loss: 0.6873 - val_accuracy: 0.7660

Epoch 00074: ReduceLROnPlateau reducing learning rate to 2.3437500203726813e-05.
Epoch 75/75
139/139 [==============================] - 0s 93us/step - loss: 0.1971 - accuracy: 0.9281 - val_loss: 0.6875 - val_accuracy: 0.7660
In [117]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 75)
In [118]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
47/47 [==============================] - 0s 106us/step
test loss: 0.687487561017909, test accuracy: 0.7659574747085571
In [119]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.7903225806451613
In [120]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
Kappa:  0.396732788798133

KMeans

In [121]:
X
Out[121]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13
0 0.311006 1.696486 0.912001 -0.211934 -0.513557 1.357699 0.777385 0.508585 -2.290902 -2.422249 -0.738438 -2.221933 -1.191363
1 0.947147 -0.576741 -1.258913 -0.786859 0.887961 -1.895175 -0.310427 -0.374360 1.478062 0.972075 -1.105984 0.612318 -1.486887
2 -0.115048 1.257050 0.043002 -2.677464 3.902183 -1.091787 0.505797 2.341684 -2.345224 -1.678088 -2.608854 -2.617777 -2.135652
3 0.621032 1.417449 1.399722 -0.625673 1.012110 0.230671 -0.287988 1.012771 -2.250326 -0.340971 -0.353905 -0.717440 -0.390485
4 0.340978 1.662814 -1.775422 0.156552 1.678811 0.301711 2.038462 1.511985 1.508787 -2.046602 0.741073 -0.282747 -0.779814
5 0.426765 -1.056701 -1.244088 -0.696846 -0.372415 -0.847420 -0.209607 1.462924 -0.541420 0.000628 -1.135148 1.608546 1.709532
6 0.210857 -1.779497 -2.206121 -0.832640 0.636169 -1.979858 -0.510102 1.437770 0.128209 0.025521 0.184211 2.300204 0.912793
7 -0.821293 -0.049796 0.237440 0.379918 0.714133 0.670070 0.122605 -0.069298 -0.126759 -0.199559 0.547891 -0.099623 -0.024895
8 0.420103 -0.662020 -0.550543 -0.566406 -0.923203 -0.295152 -0.533234 0.927026 0.119135 0.218761 -0.245778 0.627242 1.313952
9 -1.436247 0.435343 2.482690 1.099668 -0.392845 0.565039 0.569531 -0.088218 -0.131137 -0.699769 -0.538549 -0.329443 0.942919
10 -1.574051 -1.334372 -1.636184 1.768991 -0.369456 -0.008046 -1.402331 0.012625 1.135935 1.623145 -0.653935 0.182348 1.052310
11 -1.798986 -1.632467 -1.314854 2.656006 -0.096678 -0.174852 -1.748372 0.185804 0.930317 0.365776 -0.676448 0.358271 1.523770
12 -0.708207 0.931180 0.258840 -0.189291 -0.204832 -0.103872 0.221697 -0.231695 -0.003439 0.423528 1.259835 0.119625 -0.192417
13 -2.007033 -0.288096 0.099713 0.390909 1.333138 -0.069950 0.643074 0.172080 -0.109666 0.304475 -1.157528 -1.708326 -1.420079
14 -0.497985 0.020592 -0.123619 0.165046 -0.765078 -0.465219 0.172533 0.722853 0.284863 -0.035284 0.024769 -0.065990 -0.992437
15 1.200625 0.984580 -0.234312 0.348855 0.175663 0.309396 0.390611 -0.745912 -0.667554 -0.052439 0.119610 -0.862930 0.945979
16 0.435253 3.280178 0.407736 1.143148 2.291571 0.546530 0.170667 0.427708 -0.063936 -0.532360 0.404150 0.415849 0.869331
17 -0.398944 0.035026 -1.634042 -1.354378 0.854385 1.406182 -0.773335 0.663902 0.928496 1.278830 0.464511 0.235475 -0.040374
18 -0.454008 -0.234096 -0.930672 -0.507506 0.545773 0.437756 1.026910 0.013959 -0.620099 -0.593763 1.073690 0.594340 0.987056
19 0.149846 0.062252 -0.002122 0.786346 0.810930 0.304880 -0.882886 -0.043156 2.503584 0.894947 0.394981 0.761651 0.402963
20 -0.314274 0.446482 0.889744 0.891114 1.249237 0.718469 0.296834 -0.831548 -0.393364 -0.103574 0.295790 0.092061 0.424633
21 0.659365 1.053258 -0.877939 -0.295954 -1.122110 -0.035202 1.512616 0.031457 -0.700740 -1.687204 -1.136215 -1.545451 -0.082548
22 0.568507 -0.357318 -1.183577 -0.069205 0.462644 -0.956011 0.501504 0.240708 -0.025482 0.416003 0.237690 -0.566935 -0.846151
23 0.696474 0.477607 -1.637469 -1.158983 -2.224208 -1.861929 -0.176558 0.694585 0.426826 -0.088376 -0.335290 1.125320 0.705700
24 -0.221795 -0.513464 -0.506448 0.594506 0.033232 -1.141879 -1.582503 -0.081204 -0.001962 -0.704687 -0.473528 0.580117 1.533686
25 0.036099 -0.007586 0.116729 0.438081 -1.526141 -1.994283 -1.014100 0.028630 -0.553238 -0.540795 0.467730 0.943285 0.498193
26 -0.291576 -0.372192 -1.176599 0.078535 0.516288 -1.851892 -2.218803 0.335200 0.323222 0.006649 0.017717 0.133172 1.208725
27 0.953536 0.427304 -0.554063 0.425439 1.368674 0.362392 0.477030 -0.976616 -0.382390 0.310619 -0.903078 -0.943886 -0.047616
28 -1.172014 1.307258 -1.059323 -0.655908 1.591107 0.483432 0.474862 0.348014 -0.527448 0.798802 -0.075253 1.943808 0.108268
29 -0.954427 0.000731 -0.367958 0.281024 0.303337 0.744504 1.271647 0.298340 -0.057042 -0.297712 -0.053703 -0.045043 -0.561554
... ... ... ... ... ... ... ... ... ... ... ... ... ...
156 0.232363 -1.167339 -0.114632 1.240724 -0.209611 0.597503 -0.105216 -0.393018 -0.168804 -0.038601 0.602075 -0.482733 0.192333
157 -1.686193 -0.806140 -0.531342 -0.411912 0.312945 0.751058 0.624837 -0.394463 0.549120 -1.174079 -1.374572 -1.950144 -0.652535
158 0.487798 1.116042 -0.308817 0.175231 -0.191701 -0.682970 0.502123 0.749073 1.365476 0.198244 1.283992 0.132188 0.482532
159 1.049575 0.742765 0.000505 0.670386 0.235663 -0.297404 0.891743 0.047729 0.086633 0.873400 0.552393 0.496793 0.659122
160 0.285967 0.602916 -0.009050 0.802464 0.333031 -1.182611 0.473870 0.896236 0.890391 0.208214 0.786475 0.044481 -0.114927
161 2.568510 -0.180837 0.794882 1.410838 0.898076 0.468184 0.963255 0.338074 2.081580 2.353196 0.146660 -0.295606 -0.020484
162 0.821849 0.906757 0.282262 0.304716 -0.691824 0.772704 2.543328 -0.404440 1.861464 1.635426 0.204673 0.084333 0.469447
163 2.581037 0.239015 1.212048 0.498566 0.095720 0.062469 3.463238 0.374969 -0.054235 -0.365031 -0.169020 1.160964 0.666076
164 -0.247271 -0.874145 -0.840584 0.233138 0.034101 0.259892 0.144353 -0.570094 1.244117 0.282845 0.127444 -0.721587 -1.450860
165 0.188979 -0.519200 0.108496 -0.513645 -0.637646 0.812515 0.626360 -0.156977 -0.092241 -0.517923 0.026563 -0.597616 -0.101096
166 -0.015438 -0.656621 -0.739614 0.302131 0.583862 0.465267 0.342075 -0.318902 0.221544 0.654368 0.777463 -0.462212 -0.867288
167 -1.567081 -1.052883 -0.417918 0.636963 -0.531279 0.787238 -1.913461 -0.020653 -0.111129 0.112259 -0.380422 0.497894 0.709826
168 -1.883530 -0.172892 -0.340073 -0.255266 -0.480237 -0.061425 -0.158589 -0.308725 -0.034923 0.150845 0.696367 0.704196 0.473391
169 -1.577057 -0.602693 0.448785 1.073850 -0.714538 1.427240 -1.645225 0.812069 -0.019466 -0.719024 -0.991241 0.521497 0.461555
170 0.667824 -0.298287 -0.412356 -1.154598 0.171532 -0.341146 -0.411827 -1.296671 0.428160 -0.233124 0.035111 -0.934501 0.123701
171 0.626343 -0.046406 -0.168999 -1.278941 0.502027 -0.060296 0.062571 -1.284727 0.028380 -0.488598 -0.255008 -1.199407 0.088115
172 -0.441233 -0.987406 0.015664 -2.982951 -0.379800 1.485748 -2.174788 -3.521120 -0.191502 2.486906 2.771782 0.789523 0.553119
173 0.331514 0.334707 -0.187508 0.489055 1.133140 1.016598 -0.572753 -0.634721 -0.567790 -0.492410 0.634219 -0.122575 -0.205540
174 0.480131 0.345645 0.202709 -0.423456 1.184414 2.116965 -0.463050 0.212550 1.973473 -0.996794 0.924229 -0.170049 -0.068464
175 1.039410 -0.773764 0.113739 -0.796036 -1.053802 -1.238009 0.153897 0.497600 1.347261 0.972165 0.993095 0.066125 -0.069772
176 0.194148 -0.229033 -0.571129 -0.704359 -0.204400 -0.273049 1.105329 0.052851 -0.360196 0.099095 1.063628 -0.266594 0.041526
177 -0.169188 0.025195 -0.189648 0.376353 0.802036 -1.185140 0.488985 0.244963 1.305631 0.241661 0.402415 -0.494815 0.252206
178 1.433007 0.217051 -0.388425 -1.158798 2.068592 1.278810 -1.193547 -0.909321 -0.207122 2.062093 1.374797 0.383804 1.569650
179 1.371536 0.624596 -0.082552 0.444824 1.959112 -0.736647 -1.448177 0.624897 1.304939 -0.025270 -0.519401 0.592135 1.312240
180 0.815061 -1.210119 0.844643 -1.152602 -0.216878 -1.573232 -0.065062 2.136014 -0.285964 1.827988 -0.982121 1.139199 0.936226
181 0.814962 -1.028970 -1.340094 -1.579784 0.774822 -0.351654 -2.148181 2.772395 1.638263 -0.394371 1.796246 1.182459 0.824064
182 1.615277 0.706391 -0.611277 0.513438 0.987249 1.226124 0.240966 0.485917 1.355615 -0.480955 -0.255325 -0.370864 0.107591
183 0.290224 0.578762 0.024629 0.119894 0.626180 1.025427 0.180541 -0.504388 -1.085411 -1.413825 0.811722 0.640653 0.433677
184 0.086408 -1.394139 -0.501233 1.251905 -0.481983 0.026482 -1.317983 -0.580623 -0.160381 -0.718194 0.110108 -0.183905 0.074891
185 0.024909 -0.713904 -1.235134 -0.194562 0.155358 -0.586587 -0.455970 0.577457 1.172268 0.468799 0.500130 1.133624 0.192845

186 rows × 13 columns

In [122]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[122]:
[2418.0,
 2172.1027618675616,
 1998.025251732407,
 1881.5714074571665,
 1790.4988909951144,
 1710.5509624175916,
 1638.1160961205187,
 1594.2979605381602,
 1541.5219098889218,
 1492.348355544395,
 1435.6626781919845,
 1397.526425851007,
 1339.3046499586067,
 1333.0833102955885]
In [123]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[123]:
[<matplotlib.lines.Line2D at 0x1e82cc61860>]

K=3

In [124]:
kmeans_ch = KMeans(n_clusters=3, random_state=0, n_init=10)
kmeans_ch.fit(X)
Out[124]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=3, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [125]:
kmeans_ch.labels_
Out[125]:
array([1, 2, 1, 1, 0, 2, 2, 0, 2, 0, 2, 2, 0, 1, 1, 0, 0, 2, 0, 0, 0, 1,
       1, 2, 2, 2, 2, 0, 0, 1, 2, 2, 2, 1, 1, 0, 2, 2, 2, 0, 2, 1, 1, 1,
       1, 2, 0, 2, 1, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 2, 1, 2,
       2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 1, 1, 0, 2, 1, 0, 2, 1, 0, 1, 2, 2,
       0, 1, 1, 0, 1, 0, 2, 2, 2, 1, 2, 2, 0, 1, 2, 1, 2, 2, 2, 1, 1, 1,
       2, 2, 2, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 2, 1, 1, 0, 2, 1, 1, 0,
       0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 2, 1, 2, 2, 1, 0, 2, 2, 1, 0, 1, 0,
       1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 1, 1, 2, 0, 0, 2,
       1, 0, 0, 0, 2, 2, 0, 0, 2, 2])
In [126]:
clusters_ch = kmeans_ch.predict(X)
clusters_ch
Out[126]:
array([1, 2, 1, 1, 0, 2, 2, 0, 2, 0, 2, 2, 0, 1, 1, 0, 0, 2, 0, 0, 0, 1,
       1, 2, 2, 2, 2, 0, 0, 1, 2, 2, 2, 1, 1, 0, 2, 2, 2, 0, 2, 1, 1, 1,
       1, 2, 0, 2, 1, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 2, 1, 2,
       2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 1, 1, 0, 2, 1, 0, 2, 1, 0, 1, 2, 2,
       0, 1, 1, 0, 1, 0, 2, 2, 2, 1, 2, 2, 0, 1, 2, 1, 2, 2, 2, 1, 1, 1,
       2, 2, 2, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 2, 1, 1, 0, 2, 1, 1, 0,
       0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 2, 1, 2, 2, 1, 0, 2, 2, 1, 0, 1, 0,
       1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 1, 1, 2, 0, 0, 2,
       1, 0, 0, 0, 2, 2, 0, 0, 2, 2])
In [127]:
X.loc[:,'Cluster'] = clusters_ch
X.loc[:,'chosen'] = list(y)
In [128]:
X
Out[128]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13 Cluster chosen
0 0.311006 1.696486 0.912001 -0.211934 -0.513557 1.357699 0.777385 0.508585 -2.290902 -2.422249 -0.738438 -2.221933 -1.191363 1 0
1 0.947147 -0.576741 -1.258913 -0.786859 0.887961 -1.895175 -0.310427 -0.374360 1.478062 0.972075 -1.105984 0.612318 -1.486887 2 0
2 -0.115048 1.257050 0.043002 -2.677464 3.902183 -1.091787 0.505797 2.341684 -2.345224 -1.678088 -2.608854 -2.617777 -2.135652 1 0
3 0.621032 1.417449 1.399722 -0.625673 1.012110 0.230671 -0.287988 1.012771 -2.250326 -0.340971 -0.353905 -0.717440 -0.390485 1 0
4 0.340978 1.662814 -1.775422 0.156552 1.678811 0.301711 2.038462 1.511985 1.508787 -2.046602 0.741073 -0.282747 -0.779814 0 0
5 0.426765 -1.056701 -1.244088 -0.696846 -0.372415 -0.847420 -0.209607 1.462924 -0.541420 0.000628 -1.135148 1.608546 1.709532 2 0
6 0.210857 -1.779497 -2.206121 -0.832640 0.636169 -1.979858 -0.510102 1.437770 0.128209 0.025521 0.184211 2.300204 0.912793 2 0
7 -0.821293 -0.049796 0.237440 0.379918 0.714133 0.670070 0.122605 -0.069298 -0.126759 -0.199559 0.547891 -0.099623 -0.024895 0 0
8 0.420103 -0.662020 -0.550543 -0.566406 -0.923203 -0.295152 -0.533234 0.927026 0.119135 0.218761 -0.245778 0.627242 1.313952 2 0
9 -1.436247 0.435343 2.482690 1.099668 -0.392845 0.565039 0.569531 -0.088218 -0.131137 -0.699769 -0.538549 -0.329443 0.942919 0 0
10 -1.574051 -1.334372 -1.636184 1.768991 -0.369456 -0.008046 -1.402331 0.012625 1.135935 1.623145 -0.653935 0.182348 1.052310 2 0
11 -1.798986 -1.632467 -1.314854 2.656006 -0.096678 -0.174852 -1.748372 0.185804 0.930317 0.365776 -0.676448 0.358271 1.523770 2 0
12 -0.708207 0.931180 0.258840 -0.189291 -0.204832 -0.103872 0.221697 -0.231695 -0.003439 0.423528 1.259835 0.119625 -0.192417 0 0
13 -2.007033 -0.288096 0.099713 0.390909 1.333138 -0.069950 0.643074 0.172080 -0.109666 0.304475 -1.157528 -1.708326 -1.420079 1 0
14 -0.497985 0.020592 -0.123619 0.165046 -0.765078 -0.465219 0.172533 0.722853 0.284863 -0.035284 0.024769 -0.065990 -0.992437 1 0
15 1.200625 0.984580 -0.234312 0.348855 0.175663 0.309396 0.390611 -0.745912 -0.667554 -0.052439 0.119610 -0.862930 0.945979 0 0
16 0.435253 3.280178 0.407736 1.143148 2.291571 0.546530 0.170667 0.427708 -0.063936 -0.532360 0.404150 0.415849 0.869331 0 0
17 -0.398944 0.035026 -1.634042 -1.354378 0.854385 1.406182 -0.773335 0.663902 0.928496 1.278830 0.464511 0.235475 -0.040374 2 0
18 -0.454008 -0.234096 -0.930672 -0.507506 0.545773 0.437756 1.026910 0.013959 -0.620099 -0.593763 1.073690 0.594340 0.987056 0 0
19 0.149846 0.062252 -0.002122 0.786346 0.810930 0.304880 -0.882886 -0.043156 2.503584 0.894947 0.394981 0.761651 0.402963 0 0
20 -0.314274 0.446482 0.889744 0.891114 1.249237 0.718469 0.296834 -0.831548 -0.393364 -0.103574 0.295790 0.092061 0.424633 0 0
21 0.659365 1.053258 -0.877939 -0.295954 -1.122110 -0.035202 1.512616 0.031457 -0.700740 -1.687204 -1.136215 -1.545451 -0.082548 1 0
22 0.568507 -0.357318 -1.183577 -0.069205 0.462644 -0.956011 0.501504 0.240708 -0.025482 0.416003 0.237690 -0.566935 -0.846151 1 0
23 0.696474 0.477607 -1.637469 -1.158983 -2.224208 -1.861929 -0.176558 0.694585 0.426826 -0.088376 -0.335290 1.125320 0.705700 2 0
24 -0.221795 -0.513464 -0.506448 0.594506 0.033232 -1.141879 -1.582503 -0.081204 -0.001962 -0.704687 -0.473528 0.580117 1.533686 2 0
25 0.036099 -0.007586 0.116729 0.438081 -1.526141 -1.994283 -1.014100 0.028630 -0.553238 -0.540795 0.467730 0.943285 0.498193 2 0
26 -0.291576 -0.372192 -1.176599 0.078535 0.516288 -1.851892 -2.218803 0.335200 0.323222 0.006649 0.017717 0.133172 1.208725 2 0
27 0.953536 0.427304 -0.554063 0.425439 1.368674 0.362392 0.477030 -0.976616 -0.382390 0.310619 -0.903078 -0.943886 -0.047616 0 0
28 -1.172014 1.307258 -1.059323 -0.655908 1.591107 0.483432 0.474862 0.348014 -0.527448 0.798802 -0.075253 1.943808 0.108268 0 0
29 -0.954427 0.000731 -0.367958 0.281024 0.303337 0.744504 1.271647 0.298340 -0.057042 -0.297712 -0.053703 -0.045043 -0.561554 1 0
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
156 0.232363 -1.167339 -0.114632 1.240724 -0.209611 0.597503 -0.105216 -0.393018 -0.168804 -0.038601 0.602075 -0.482733 0.192333 0 1
157 -1.686193 -0.806140 -0.531342 -0.411912 0.312945 0.751058 0.624837 -0.394463 0.549120 -1.174079 -1.374572 -1.950144 -0.652535 1 1
158 0.487798 1.116042 -0.308817 0.175231 -0.191701 -0.682970 0.502123 0.749073 1.365476 0.198244 1.283992 0.132188 0.482532 0 1
159 1.049575 0.742765 0.000505 0.670386 0.235663 -0.297404 0.891743 0.047729 0.086633 0.873400 0.552393 0.496793 0.659122 0 1
160 0.285967 0.602916 -0.009050 0.802464 0.333031 -1.182611 0.473870 0.896236 0.890391 0.208214 0.786475 0.044481 -0.114927 0 1
161 2.568510 -0.180837 0.794882 1.410838 0.898076 0.468184 0.963255 0.338074 2.081580 2.353196 0.146660 -0.295606 -0.020484 0 1
162 0.821849 0.906757 0.282262 0.304716 -0.691824 0.772704 2.543328 -0.404440 1.861464 1.635426 0.204673 0.084333 0.469447 0 1
163 2.581037 0.239015 1.212048 0.498566 0.095720 0.062469 3.463238 0.374969 -0.054235 -0.365031 -0.169020 1.160964 0.666076 0 1
164 -0.247271 -0.874145 -0.840584 0.233138 0.034101 0.259892 0.144353 -0.570094 1.244117 0.282845 0.127444 -0.721587 -1.450860 1 1
165 0.188979 -0.519200 0.108496 -0.513645 -0.637646 0.812515 0.626360 -0.156977 -0.092241 -0.517923 0.026563 -0.597616 -0.101096 1 1
166 -0.015438 -0.656621 -0.739614 0.302131 0.583862 0.465267 0.342075 -0.318902 0.221544 0.654368 0.777463 -0.462212 -0.867288 1 1
167 -1.567081 -1.052883 -0.417918 0.636963 -0.531279 0.787238 -1.913461 -0.020653 -0.111129 0.112259 -0.380422 0.497894 0.709826 2 1
168 -1.883530 -0.172892 -0.340073 -0.255266 -0.480237 -0.061425 -0.158589 -0.308725 -0.034923 0.150845 0.696367 0.704196 0.473391 2 1
169 -1.577057 -0.602693 0.448785 1.073850 -0.714538 1.427240 -1.645225 0.812069 -0.019466 -0.719024 -0.991241 0.521497 0.461555 2 1
170 0.667824 -0.298287 -0.412356 -1.154598 0.171532 -0.341146 -0.411827 -1.296671 0.428160 -0.233124 0.035111 -0.934501 0.123701 1 1
171 0.626343 -0.046406 -0.168999 -1.278941 0.502027 -0.060296 0.062571 -1.284727 0.028380 -0.488598 -0.255008 -1.199407 0.088115 1 1
172 -0.441233 -0.987406 0.015664 -2.982951 -0.379800 1.485748 -2.174788 -3.521120 -0.191502 2.486906 2.771782 0.789523 0.553119 2 1
173 0.331514 0.334707 -0.187508 0.489055 1.133140 1.016598 -0.572753 -0.634721 -0.567790 -0.492410 0.634219 -0.122575 -0.205540 0 1
174 0.480131 0.345645 0.202709 -0.423456 1.184414 2.116965 -0.463050 0.212550 1.973473 -0.996794 0.924229 -0.170049 -0.068464 0 1
175 1.039410 -0.773764 0.113739 -0.796036 -1.053802 -1.238009 0.153897 0.497600 1.347261 0.972165 0.993095 0.066125 -0.069772 2 1
176 0.194148 -0.229033 -0.571129 -0.704359 -0.204400 -0.273049 1.105329 0.052851 -0.360196 0.099095 1.063628 -0.266594 0.041526 1 1
177 -0.169188 0.025195 -0.189648 0.376353 0.802036 -1.185140 0.488985 0.244963 1.305631 0.241661 0.402415 -0.494815 0.252206 0 1
178 1.433007 0.217051 -0.388425 -1.158798 2.068592 1.278810 -1.193547 -0.909321 -0.207122 2.062093 1.374797 0.383804 1.569650 0 1
179 1.371536 0.624596 -0.082552 0.444824 1.959112 -0.736647 -1.448177 0.624897 1.304939 -0.025270 -0.519401 0.592135 1.312240 0 1
180 0.815061 -1.210119 0.844643 -1.152602 -0.216878 -1.573232 -0.065062 2.136014 -0.285964 1.827988 -0.982121 1.139199 0.936226 2 1
181 0.814962 -1.028970 -1.340094 -1.579784 0.774822 -0.351654 -2.148181 2.772395 1.638263 -0.394371 1.796246 1.182459 0.824064 2 1
182 1.615277 0.706391 -0.611277 0.513438 0.987249 1.226124 0.240966 0.485917 1.355615 -0.480955 -0.255325 -0.370864 0.107591 0 1
183 0.290224 0.578762 0.024629 0.119894 0.626180 1.025427 0.180541 -0.504388 -1.085411 -1.413825 0.811722 0.640653 0.433677 0 1
184 0.086408 -1.394139 -0.501233 1.251905 -0.481983 0.026482 -1.317983 -0.580623 -0.160381 -0.718194 0.110108 -0.183905 0.074891 2 1
185 0.024909 -0.713904 -1.235134 -0.194562 0.155358 -0.586587 -0.455970 0.577457 1.172268 0.468799 0.500130 1.133624 0.192845 2 1

186 rows × 15 columns

In [129]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[129]:
<matplotlib.axes._subplots.AxesSubplot at 0x1e82cc917b8>
In [130]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[4]))

Specialized

ANN

In [131]:
X = df_n_ps_std_ch[4]
In [132]:
y = df_n_ps[4]['chosen']
In [133]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [134]:
X_train.shape
Out[134]:
(164, 13)
In [135]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [136]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [137]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [138]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'relu', 'hidden_layer_sizes': (20, 20, 20), 'learning_rate_init': 0.003, 'max_iter': 400}, que permiten obtener un Accuracy de 72.56% y un Kappa del 45.12
Tiempo total: 20.25 minutos
In [139]:
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [140]:
input_tensor = Input(shape = (n0,))
In [141]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = 'tanh')(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [142]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [143]:
model.summary()
Model: "model_5"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_5 (InputLayer)         (None, 13)                0         
_________________________________________________________________
dense_14 (Dense)             (None, 20)                280       
_________________________________________________________________
dense_15 (Dense)             (None, 20)                420       
_________________________________________________________________
dense_16 (Dense)             (None, 20)                420       
_________________________________________________________________
dense_17 (Dense)             (None, 1)                 21        
=================================================================
Total params: 1,141
Trainable params: 1,141
Non-trainable params: 0
_________________________________________________________________
In [144]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), batch_size= 32,
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 164 samples, validate on 55 samples
Epoch 1/400
164/164 [==============================] - 0s 1ms/step - loss: 0.7149 - accuracy: 0.5244 - val_loss: 0.7183 - val_accuracy: 0.5273
Epoch 2/400
164/164 [==============================] - 0s 67us/step - loss: 0.6628 - accuracy: 0.5854 - val_loss: 0.7022 - val_accuracy: 0.6182
Epoch 3/400
164/164 [==============================] - 0s 73us/step - loss: 0.6490 - accuracy: 0.6707 - val_loss: 0.6958 - val_accuracy: 0.6364
Epoch 4/400
164/164 [==============================] - 0s 67us/step - loss: 0.6445 - accuracy: 0.6707 - val_loss: 0.6991 - val_accuracy: 0.6182
Epoch 5/400
164/164 [==============================] - 0s 73us/step - loss: 0.6399 - accuracy: 0.6646 - val_loss: 0.6941 - val_accuracy: 0.6182
Epoch 6/400
164/164 [==============================] - 0s 73us/step - loss: 0.6294 - accuracy: 0.6951 - val_loss: 0.6918 - val_accuracy: 0.6182
Epoch 7/400
164/164 [==============================] - 0s 104us/step - loss: 0.6196 - accuracy: 0.6951 - val_loss: 0.6923 - val_accuracy: 0.6182
Epoch 8/400
164/164 [==============================] - 0s 73us/step - loss: 0.6167 - accuracy: 0.7134 - val_loss: 0.6890 - val_accuracy: 0.6000
Epoch 9/400
164/164 [==============================] - 0s 67us/step - loss: 0.6139 - accuracy: 0.6829 - val_loss: 0.6929 - val_accuracy: 0.6000
Epoch 10/400
164/164 [==============================] - 0s 73us/step - loss: 0.6108 - accuracy: 0.6890 - val_loss: 0.6929 - val_accuracy: 0.6182
Epoch 11/400
164/164 [==============================] - 0s 79us/step - loss: 0.6044 - accuracy: 0.7012 - val_loss: 0.6875 - val_accuracy: 0.6000
Epoch 12/400
164/164 [==============================] - 0s 79us/step - loss: 0.5918 - accuracy: 0.7073 - val_loss: 0.6807 - val_accuracy: 0.6545
Epoch 13/400
164/164 [==============================] - 0s 67us/step - loss: 0.5862 - accuracy: 0.7073 - val_loss: 0.6789 - val_accuracy: 0.6545
Epoch 14/400
164/164 [==============================] - 0s 110us/step - loss: 0.5762 - accuracy: 0.6890 - val_loss: 0.6712 - val_accuracy: 0.6545
Epoch 15/400
164/164 [==============================] - 0s 73us/step - loss: 0.5665 - accuracy: 0.7073 - val_loss: 0.6710 - val_accuracy: 0.6727
Epoch 16/400
164/164 [==============================] - 0s 73us/step - loss: 0.5596 - accuracy: 0.7134 - val_loss: 0.6779 - val_accuracy: 0.6727
Epoch 17/400
164/164 [==============================] - 0s 73us/step - loss: 0.5537 - accuracy: 0.7012 - val_loss: 0.6888 - val_accuracy: 0.6727
Epoch 18/400
164/164 [==============================] - 0s 73us/step - loss: 0.5451 - accuracy: 0.7134 - val_loss: 0.6966 - val_accuracy: 0.6364
Epoch 19/400
164/164 [==============================] - 0s 79us/step - loss: 0.5349 - accuracy: 0.7256 - val_loss: 0.7024 - val_accuracy: 0.5818
Epoch 20/400
164/164 [==============================] - 0s 79us/step - loss: 0.5252 - accuracy: 0.7378 - val_loss: 0.7101 - val_accuracy: 0.6364
Epoch 21/400
164/164 [==============================] - 0s 104us/step - loss: 0.5172 - accuracy: 0.7256 - val_loss: 0.7167 - val_accuracy: 0.6182
Epoch 22/400
164/164 [==============================] - 0s 79us/step - loss: 0.5140 - accuracy: 0.7317 - val_loss: 0.7129 - val_accuracy: 0.6000
Epoch 23/400
164/164 [==============================] - 0s 73us/step - loss: 0.5070 - accuracy: 0.7378 - val_loss: 0.6954 - val_accuracy: 0.6000
Epoch 24/400
164/164 [==============================] - 0s 73us/step - loss: 0.4857 - accuracy: 0.7744 - val_loss: 0.6973 - val_accuracy: 0.5818
Epoch 25/400
164/164 [==============================] - 0s 79us/step - loss: 0.4784 - accuracy: 0.7927 - val_loss: 0.6920 - val_accuracy: 0.6182

Epoch 00025: ReduceLROnPlateau reducing learning rate to 0.001500000013038516.
Epoch 26/400
164/164 [==============================] - 0s 73us/step - loss: 0.4691 - accuracy: 0.8110 - val_loss: 0.6896 - val_accuracy: 0.6364
Epoch 27/400
164/164 [==============================] - 0s 85us/step - loss: 0.4629 - accuracy: 0.8110 - val_loss: 0.6919 - val_accuracy: 0.6182
Epoch 28/400
164/164 [==============================] - 0s 85us/step - loss: 0.4541 - accuracy: 0.8110 - val_loss: 0.6785 - val_accuracy: 0.6364
Epoch 29/400
164/164 [==============================] - 0s 73us/step - loss: 0.4506 - accuracy: 0.8049 - val_loss: 0.6732 - val_accuracy: 0.6364
Epoch 30/400
164/164 [==============================] - 0s 79us/step - loss: 0.4403 - accuracy: 0.8171 - val_loss: 0.6752 - val_accuracy: 0.6182
Epoch 31/400
164/164 [==============================] - 0s 110us/step - loss: 0.4384 - accuracy: 0.8476 - val_loss: 0.6800 - val_accuracy: 0.6182
Epoch 32/400
164/164 [==============================] - 0s 79us/step - loss: 0.4345 - accuracy: 0.8537 - val_loss: 0.6872 - val_accuracy: 0.6182
Epoch 33/400
164/164 [==============================] - 0s 98us/step - loss: 0.4265 - accuracy: 0.8598 - val_loss: 0.6875 - val_accuracy: 0.6545
Epoch 34/400
164/164 [==============================] - 0s 79us/step - loss: 0.4183 - accuracy: 0.8476 - val_loss: 0.6883 - val_accuracy: 0.6727
Epoch 35/400
164/164 [==============================] - 0s 67us/step - loss: 0.4100 - accuracy: 0.8415 - val_loss: 0.6959 - val_accuracy: 0.6727

Epoch 00035: ReduceLROnPlateau reducing learning rate to 0.000750000006519258.
Epoch 36/400
164/164 [==============================] - 0s 67us/step - loss: 0.4034 - accuracy: 0.8293 - val_loss: 0.6946 - val_accuracy: 0.7091
Epoch 37/400
164/164 [==============================] - 0s 67us/step - loss: 0.3991 - accuracy: 0.8293 - val_loss: 0.6968 - val_accuracy: 0.6909
Epoch 38/400
164/164 [==============================] - 0s 85us/step - loss: 0.3958 - accuracy: 0.8293 - val_loss: 0.6987 - val_accuracy: 0.6909
Epoch 39/400
164/164 [==============================] - 0s 97us/step - loss: 0.3918 - accuracy: 0.8354 - val_loss: 0.7003 - val_accuracy: 0.6909
Epoch 40/400
164/164 [==============================] - 0s 79us/step - loss: 0.3892 - accuracy: 0.8354 - val_loss: 0.7088 - val_accuracy: 0.6545
Epoch 41/400
164/164 [==============================] - 0s 73us/step - loss: 0.3852 - accuracy: 0.8354 - val_loss: 0.7138 - val_accuracy: 0.6545
Epoch 42/400
164/164 [==============================] - 0s 79us/step - loss: 0.3819 - accuracy: 0.8476 - val_loss: 0.7122 - val_accuracy: 0.6909
Epoch 43/400
164/164 [==============================] - 0s 79us/step - loss: 0.3787 - accuracy: 0.8598 - val_loss: 0.7089 - val_accuracy: 0.6909
Epoch 44/400
164/164 [==============================] - 0s 73us/step - loss: 0.3763 - accuracy: 0.8598 - val_loss: 0.7081 - val_accuracy: 0.6909
Epoch 45/400
164/164 [==============================] - 0s 73us/step - loss: 0.3726 - accuracy: 0.8598 - val_loss: 0.7091 - val_accuracy: 0.6727
Epoch 46/400
164/164 [==============================] - 0s 73us/step - loss: 0.3678 - accuracy: 0.8598 - val_loss: 0.7040 - val_accuracy: 0.6727

Epoch 00046: ReduceLROnPlateau reducing learning rate to 0.000375000003259629.
Epoch 47/400
164/164 [==============================] - 0s 98us/step - loss: 0.3655 - accuracy: 0.8598 - val_loss: 0.7071 - val_accuracy: 0.6727
Epoch 48/400
164/164 [==============================] - 0s 85us/step - loss: 0.3630 - accuracy: 0.8598 - val_loss: 0.7106 - val_accuracy: 0.6545
Epoch 49/400
164/164 [==============================] - 0s 73us/step - loss: 0.3608 - accuracy: 0.8598 - val_loss: 0.7129 - val_accuracy: 0.6545
Epoch 50/400
164/164 [==============================] - 0s 79us/step - loss: 0.3591 - accuracy: 0.8659 - val_loss: 0.7160 - val_accuracy: 0.6364
Epoch 51/400
164/164 [==============================] - 0s 73us/step - loss: 0.3573 - accuracy: 0.8659 - val_loss: 0.7198 - val_accuracy: 0.6364
Epoch 52/400
164/164 [==============================] - 0s 79us/step - loss: 0.3555 - accuracy: 0.8659 - val_loss: 0.7224 - val_accuracy: 0.6545
Epoch 53/400
164/164 [==============================] - 0s 73us/step - loss: 0.3539 - accuracy: 0.8659 - val_loss: 0.7248 - val_accuracy: 0.6364
Epoch 54/400
164/164 [==============================] - 0s 85us/step - loss: 0.3524 - accuracy: 0.8659 - val_loss: 0.7281 - val_accuracy: 0.6364
Epoch 55/400
164/164 [==============================] - 0s 98us/step - loss: 0.3514 - accuracy: 0.8902 - val_loss: 0.7324 - val_accuracy: 0.6364
Epoch 56/400
164/164 [==============================] - 0s 79us/step - loss: 0.3504 - accuracy: 0.8841 - val_loss: 0.7356 - val_accuracy: 0.6364

Epoch 00056: ReduceLROnPlateau reducing learning rate to 0.0001875000016298145.
Epoch 57/400
164/164 [==============================] - 0s 73us/step - loss: 0.3483 - accuracy: 0.8902 - val_loss: 0.7344 - val_accuracy: 0.6364
Epoch 58/400
164/164 [==============================] - 0s 79us/step - loss: 0.3472 - accuracy: 0.8902 - val_loss: 0.7329 - val_accuracy: 0.6364
Epoch 59/400
164/164 [==============================] - 0s 79us/step - loss: 0.3463 - accuracy: 0.8963 - val_loss: 0.7335 - val_accuracy: 0.6364
Epoch 60/400
164/164 [==============================] - 0s 73us/step - loss: 0.3449 - accuracy: 0.8963 - val_loss: 0.7348 - val_accuracy: 0.6364
Epoch 61/400
164/164 [==============================] - 0s 73us/step - loss: 0.3439 - accuracy: 0.8963 - val_loss: 0.7356 - val_accuracy: 0.6364
Epoch 62/400
164/164 [==============================] - 0s 73us/step - loss: 0.3427 - accuracy: 0.8902 - val_loss: 0.7366 - val_accuracy: 0.6364
Epoch 63/400
164/164 [==============================] - 0s 98us/step - loss: 0.3420 - accuracy: 0.8902 - val_loss: 0.7351 - val_accuracy: 0.6364
Epoch 64/400
164/164 [==============================] - 0s 79us/step - loss: 0.3409 - accuracy: 0.8902 - val_loss: 0.7358 - val_accuracy: 0.6364
Epoch 65/400
164/164 [==============================] - 0s 73us/step - loss: 0.3398 - accuracy: 0.8780 - val_loss: 0.7364 - val_accuracy: 0.6364
Epoch 66/400
164/164 [==============================] - 0s 73us/step - loss: 0.3389 - accuracy: 0.8780 - val_loss: 0.7359 - val_accuracy: 0.6545

Epoch 00066: ReduceLROnPlateau reducing learning rate to 9.375000081490725e-05.
Epoch 67/400
164/164 [==============================] - 0s 128us/step - loss: 0.3380 - accuracy: 0.8841 - val_loss: 0.7359 - val_accuracy: 0.6545
Epoch 68/400
164/164 [==============================] - 0s 79us/step - loss: 0.3373 - accuracy: 0.8841 - val_loss: 0.7361 - val_accuracy: 0.6364
Epoch 69/400
164/164 [==============================] - 0s 79us/step - loss: 0.3372 - accuracy: 0.8902 - val_loss: 0.7360 - val_accuracy: 0.6364
Epoch 70/400
164/164 [==============================] - 0s 73us/step - loss: 0.3364 - accuracy: 0.8902 - val_loss: 0.7358 - val_accuracy: 0.6364
Epoch 71/400
164/164 [==============================] - 0s 79us/step - loss: 0.3359 - accuracy: 0.8902 - val_loss: 0.7358 - val_accuracy: 0.6364
Epoch 72/400
164/164 [==============================] - 0s 91us/step - loss: 0.3356 - accuracy: 0.8841 - val_loss: 0.7359 - val_accuracy: 0.6545
Epoch 73/400
164/164 [==============================] - 0s 85us/step - loss: 0.3349 - accuracy: 0.8841 - val_loss: 0.7369 - val_accuracy: 0.6545
Epoch 74/400
164/164 [==============================] - 0s 67us/step - loss: 0.3343 - accuracy: 0.8841 - val_loss: 0.7377 - val_accuracy: 0.6545
Epoch 75/400
164/164 [==============================] - 0s 85us/step - loss: 0.3341 - accuracy: 0.8780 - val_loss: 0.7388 - val_accuracy: 0.6364
Epoch 76/400
164/164 [==============================] - 0s 85us/step - loss: 0.3336 - accuracy: 0.8780 - val_loss: 0.7396 - val_accuracy: 0.6364

Epoch 00076: ReduceLROnPlateau reducing learning rate to 4.6875000407453626e-05.
Epoch 77/400
164/164 [==============================] - 0s 73us/step - loss: 0.3332 - accuracy: 0.8841 - val_loss: 0.7399 - val_accuracy: 0.6364
Epoch 78/400
164/164 [==============================] - 0s 73us/step - loss: 0.3331 - accuracy: 0.8841 - val_loss: 0.7402 - val_accuracy: 0.6364
Epoch 79/400
164/164 [==============================] - 0s 73us/step - loss: 0.3328 - accuracy: 0.8841 - val_loss: 0.7402 - val_accuracy: 0.6364
Epoch 80/400
164/164 [==============================] - 0s 73us/step - loss: 0.3326 - accuracy: 0.8841 - val_loss: 0.7405 - val_accuracy: 0.6364
Epoch 81/400
164/164 [==============================] - 0s 73us/step - loss: 0.3324 - accuracy: 0.8841 - val_loss: 0.7409 - val_accuracy: 0.6364
Epoch 82/400
164/164 [==============================] - 0s 79us/step - loss: 0.3322 - accuracy: 0.8841 - val_loss: 0.7409 - val_accuracy: 0.6364
Epoch 83/400
164/164 [==============================] - 0s 79us/step - loss: 0.3319 - accuracy: 0.8902 - val_loss: 0.7408 - val_accuracy: 0.6364
Epoch 84/400
164/164 [==============================] - 0s 91us/step - loss: 0.3317 - accuracy: 0.8902 - val_loss: 0.7409 - val_accuracy: 0.6364
Epoch 85/400
164/164 [==============================] - 0s 79us/step - loss: 0.3315 - accuracy: 0.8902 - val_loss: 0.7411 - val_accuracy: 0.6364
Epoch 86/400
164/164 [==============================] - 0s 73us/step - loss: 0.3312 - accuracy: 0.8902 - val_loss: 0.7412 - val_accuracy: 0.6364

Epoch 00086: ReduceLROnPlateau reducing learning rate to 2.3437500203726813e-05.
Epoch 87/400
164/164 [==============================] - 0s 79us/step - loss: 0.3310 - accuracy: 0.8902 - val_loss: 0.7412 - val_accuracy: 0.6364
Epoch 88/400
164/164 [==============================] - 0s 104us/step - loss: 0.3309 - accuracy: 0.8902 - val_loss: 0.7412 - val_accuracy: 0.6364
Epoch 89/400
164/164 [==============================] - 0s 104us/step - loss: 0.3307 - accuracy: 0.8902 - val_loss: 0.7412 - val_accuracy: 0.6364
Epoch 90/400
164/164 [==============================] - 0s 91us/step - loss: 0.3306 - accuracy: 0.8902 - val_loss: 0.7413 - val_accuracy: 0.6364
Epoch 91/400
164/164 [==============================] - 0s 98us/step - loss: 0.3305 - accuracy: 0.8902 - val_loss: 0.7415 - val_accuracy: 0.6364
Epoch 92/400
164/164 [==============================] - 0s 73us/step - loss: 0.3305 - accuracy: 0.8963 - val_loss: 0.7418 - val_accuracy: 0.6364
Epoch 93/400
164/164 [==============================] - 0s 79us/step - loss: 0.3304 - accuracy: 0.8963 - val_loss: 0.7419 - val_accuracy: 0.6364
Epoch 94/400
164/164 [==============================] - 0s 79us/step - loss: 0.3302 - accuracy: 0.8963 - val_loss: 0.7420 - val_accuracy: 0.6364
Epoch 95/400
164/164 [==============================] - 0s 91us/step - loss: 0.3301 - accuracy: 0.8963 - val_loss: 0.7422 - val_accuracy: 0.6364
Epoch 96/400
164/164 [==============================] - 0s 79us/step - loss: 0.3300 - accuracy: 0.8963 - val_loss: 0.7422 - val_accuracy: 0.6545

Epoch 00096: ReduceLROnPlateau reducing learning rate to 1.1718750101863407e-05.
Epoch 97/400
164/164 [==============================] - 0s 79us/step - loss: 0.3299 - accuracy: 0.8963 - val_loss: 0.7423 - val_accuracy: 0.6545
Epoch 98/400
164/164 [==============================] - 0s 79us/step - loss: 0.3298 - accuracy: 0.8963 - val_loss: 0.7423 - val_accuracy: 0.6545
Epoch 99/400
164/164 [==============================] - 0s 91us/step - loss: 0.3298 - accuracy: 0.8963 - val_loss: 0.7422 - val_accuracy: 0.6545
Epoch 100/400
164/164 [==============================] - 0s 79us/step - loss: 0.3297 - accuracy: 0.8963 - val_loss: 0.7422 - val_accuracy: 0.6545
Epoch 101/400
164/164 [==============================] - 0s 79us/step - loss: 0.3297 - accuracy: 0.8963 - val_loss: 0.7422 - val_accuracy: 0.6545
Epoch 102/400
164/164 [==============================] - 0s 79us/step - loss: 0.3296 - accuracy: 0.8963 - val_loss: 0.7423 - val_accuracy: 0.6545
Epoch 103/400
164/164 [==============================] - 0s 73us/step - loss: 0.3296 - accuracy: 0.8963 - val_loss: 0.7423 - val_accuracy: 0.6545
Epoch 104/400
164/164 [==============================] - 0s 97us/step - loss: 0.3295 - accuracy: 0.8963 - val_loss: 0.7423 - val_accuracy: 0.6545
Epoch 105/400
164/164 [==============================] - 0s 79us/step - loss: 0.3295 - accuracy: 0.8963 - val_loss: 0.7424 - val_accuracy: 0.6545
Epoch 106/400
164/164 [==============================] - 0s 67us/step - loss: 0.3294 - accuracy: 0.8963 - val_loss: 0.7424 - val_accuracy: 0.6545

Epoch 00106: ReduceLROnPlateau reducing learning rate to 5.859375050931703e-06.
Epoch 107/400
164/164 [==============================] - 0s 91us/step - loss: 0.3294 - accuracy: 0.8963 - val_loss: 0.7424 - val_accuracy: 0.6545
Epoch 108/400
164/164 [==============================] - 0s 85us/step - loss: 0.3293 - accuracy: 0.8963 - val_loss: 0.7424 - val_accuracy: 0.6545
Epoch 109/400
164/164 [==============================] - 0s 79us/step - loss: 0.3293 - accuracy: 0.8963 - val_loss: 0.7425 - val_accuracy: 0.6545
Epoch 110/400
164/164 [==============================] - 0s 79us/step - loss: 0.3293 - accuracy: 0.8963 - val_loss: 0.7425 - val_accuracy: 0.6545
Epoch 111/400
164/164 [==============================] - 0s 91us/step - loss: 0.3292 - accuracy: 0.8963 - val_loss: 0.7425 - val_accuracy: 0.6545
Epoch 112/400
164/164 [==============================] - 0s 79us/step - loss: 0.3292 - accuracy: 0.8963 - val_loss: 0.7425 - val_accuracy: 0.6545
Epoch 113/400
164/164 [==============================] - 0s 79us/step - loss: 0.3292 - accuracy: 0.8963 - val_loss: 0.7425 - val_accuracy: 0.6545
Epoch 114/400
164/164 [==============================] - 0s 85us/step - loss: 0.3292 - accuracy: 0.8963 - val_loss: 0.7425 - val_accuracy: 0.6545
Epoch 115/400
164/164 [==============================] - 0s 85us/step - loss: 0.3291 - accuracy: 0.8963 - val_loss: 0.7425 - val_accuracy: 0.6545
Epoch 116/400
164/164 [==============================] - 0s 85us/step - loss: 0.3291 - accuracy: 0.8963 - val_loss: 0.7425 - val_accuracy: 0.6545

Epoch 00116: ReduceLROnPlateau reducing learning rate to 2.9296875254658516e-06.
Epoch 117/400
164/164 [==============================] - 0s 73us/step - loss: 0.3291 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 118/400
164/164 [==============================] - 0s 85us/step - loss: 0.3291 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 119/400
164/164 [==============================] - 0s 79us/step - loss: 0.3290 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 120/400
164/164 [==============================] - 0s 73us/step - loss: 0.3290 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 121/400
164/164 [==============================] - 0s 79us/step - loss: 0.3290 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 122/400
164/164 [==============================] - 0s 98us/step - loss: 0.3290 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 123/400
164/164 [==============================] - 0s 79us/step - loss: 0.3290 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 124/400
164/164 [==============================] - 0s 73us/step - loss: 0.3290 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 125/400
164/164 [==============================] - 0s 73us/step - loss: 0.3290 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 126/400
164/164 [==============================] - 0s 104us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7425 - val_accuracy: 0.6545

Epoch 00126: ReduceLROnPlateau reducing learning rate to 1.4648437627329258e-06.
Epoch 127/400
164/164 [==============================] - 0s 79us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7425 - val_accuracy: 0.6545
Epoch 128/400
164/164 [==============================] - 0s 79us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7425 - val_accuracy: 0.6545
Epoch 129/400
164/164 [==============================] - 0s 79us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7425 - val_accuracy: 0.6545
Epoch 130/400
164/164 [==============================] - 0s 134us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 131/400
164/164 [==============================] - 0s 98us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 132/400
164/164 [==============================] - 0s 85us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7425 - val_accuracy: 0.6545
Epoch 133/400
164/164 [==============================] - 0s 73us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 134/400
164/164 [==============================] - 0s 73us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 135/400
164/164 [==============================] - 0s 73us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 136/400
164/164 [==============================] - 0s 104us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00136: ReduceLROnPlateau reducing learning rate to 7.324218813664629e-07.
Epoch 137/400
164/164 [==============================] - 0s 73us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 138/400
164/164 [==============================] - 0s 73us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 139/400
164/164 [==============================] - 0s 73us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 140/400
164/164 [==============================] - 0s 116us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 141/400
164/164 [==============================] - 0s 91us/step - loss: 0.3289 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 142/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 143/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 144/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 145/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 146/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00146: ReduceLROnPlateau reducing learning rate to 3.6621094068323146e-07.
Epoch 147/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 148/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 149/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 150/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 151/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 152/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 153/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 154/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 155/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 156/400
164/164 [==============================] - 0s 110us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00156: ReduceLROnPlateau reducing learning rate to 1.8310547034161573e-07.
Epoch 157/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 158/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 159/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 160/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 161/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 162/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 163/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 164/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 165/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 166/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00166: ReduceLROnPlateau reducing learning rate to 9.155273517080786e-08.
Epoch 167/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 168/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 169/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 170/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 171/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 172/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 173/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 174/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 175/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 176/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00176: ReduceLROnPlateau reducing learning rate to 4.577636758540393e-08.
Epoch 177/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 178/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 179/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 180/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 181/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 182/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 183/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 184/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 185/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 186/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00186: ReduceLROnPlateau reducing learning rate to 2.2888183792701966e-08.
Epoch 187/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 188/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 189/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 190/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 191/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 192/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 193/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 194/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 195/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 196/400
164/164 [==============================] - 0s 110us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00196: ReduceLROnPlateau reducing learning rate to 1.1444091896350983e-08.
Epoch 197/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 198/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 199/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 200/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 201/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 202/400
164/164 [==============================] - 0s 110us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 203/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 204/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 205/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 206/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00206: ReduceLROnPlateau reducing learning rate to 5.7220459481754915e-09.
Epoch 207/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 208/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 209/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 210/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 211/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 212/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 213/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 214/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 215/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 216/400
164/164 [==============================] - 0s 122us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00216: ReduceLROnPlateau reducing learning rate to 2.8610229740877458e-09.
Epoch 217/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 218/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 219/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 220/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 221/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 222/400
164/164 [==============================] - 0s 134us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 223/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 224/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 225/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 226/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00226: ReduceLROnPlateau reducing learning rate to 1.4305114870438729e-09.
Epoch 227/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 228/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 229/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 230/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 231/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 232/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 233/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 234/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 235/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 236/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00236: ReduceLROnPlateau reducing learning rate to 7.152557435219364e-10.
Epoch 237/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 238/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 239/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 240/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 241/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 242/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 243/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 244/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 245/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 246/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00246: ReduceLROnPlateau reducing learning rate to 3.576278717609682e-10.
Epoch 247/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 248/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 249/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 250/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 251/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 252/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 253/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 254/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 255/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 256/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00256: ReduceLROnPlateau reducing learning rate to 1.788139358804841e-10.
Epoch 257/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 258/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 259/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 260/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 261/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 262/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 263/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 264/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 265/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 266/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00266: ReduceLROnPlateau reducing learning rate to 8.940696794024205e-11.
Epoch 267/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 268/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 269/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 270/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 271/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 272/400
164/164 [==============================] - 0s 110us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 273/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 274/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 275/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 276/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00276: ReduceLROnPlateau reducing learning rate to 4.470348397012103e-11.
Epoch 277/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 278/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 279/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 280/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 281/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 282/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 283/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 284/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 285/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 286/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00286: ReduceLROnPlateau reducing learning rate to 2.2351741985060514e-11.
Epoch 287/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 288/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 289/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 290/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 291/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 292/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 293/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 294/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 295/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 296/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00296: ReduceLROnPlateau reducing learning rate to 1.1175870992530257e-11.
Epoch 297/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 298/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 299/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 300/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 301/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 302/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 303/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 304/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 305/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 306/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00306: ReduceLROnPlateau reducing learning rate to 5.5879354962651284e-12.
Epoch 307/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 308/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 309/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 310/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 311/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 312/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 313/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 314/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 315/400
164/164 [==============================] - 0s 110us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 316/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00316: ReduceLROnPlateau reducing learning rate to 2.7939677481325642e-12.
Epoch 317/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 318/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 319/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 320/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 321/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 322/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 323/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 324/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 325/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 326/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00326: ReduceLROnPlateau reducing learning rate to 1.3969838740662821e-12.
Epoch 327/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 328/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 329/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 330/400
164/164 [==============================] - 0s 67us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 331/400
164/164 [==============================] - ETA: 0s - loss: 0.3229 - accuracy: 0.90 - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 332/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 333/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 334/400
164/164 [==============================] - 0s 91us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 335/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 336/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00336: ReduceLROnPlateau reducing learning rate to 6.984919370331411e-13.
Epoch 337/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 338/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 339/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 340/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 341/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 342/400
164/164 [==============================] - 0s 85us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 343/400
164/164 [==============================] - 0s 79us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 344/400
164/164 [==============================] - 0s 110us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 345/400
164/164 [==============================] - 0s 73us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 346/400
164/164 [==============================] - 0s 97us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00346: ReduceLROnPlateau reducing learning rate to 3.4924596851657053e-13.
Epoch 347/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 348/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 349/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 350/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 351/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 352/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 353/400
164/164 [==============================] - 0s 134us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 354/400
164/164 [==============================] - 0s 158us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 355/400
164/164 [==============================] - 0s 140us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 356/400
164/164 [==============================] - 0s 122us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00356: ReduceLROnPlateau reducing learning rate to 1.7462298425828526e-13.
Epoch 357/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 358/400
164/164 [==============================] - 0s 134us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 359/400
164/164 [==============================] - 0s 110us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 360/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 361/400
164/164 [==============================] - 0s 128us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 362/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 363/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 364/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 365/400
164/164 [==============================] - 0s 128us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 366/400
164/164 [==============================] - 0s 110us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00366: ReduceLROnPlateau reducing learning rate to 8.731149212914263e-14.
Epoch 367/400
164/164 [==============================] - 0s 110us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 368/400
164/164 [==============================] - 0s 128us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 369/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 370/400
164/164 [==============================] - 0s 110us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 371/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 372/400
164/164 [==============================] - 0s 128us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 373/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 374/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 375/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 376/400
164/164 [==============================] - 0s 134us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00376: ReduceLROnPlateau reducing learning rate to 4.3655746064571316e-14.
Epoch 377/400
164/164 [==============================] - 0s 128us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 378/400
164/164 [==============================] - 0s 110us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 379/400
164/164 [==============================] - 0s 134us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 380/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 381/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 382/400
164/164 [==============================] - 0s 110us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 383/400
164/164 [==============================] - 0s 134us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 384/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 385/400
164/164 [==============================] - 0s 110us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 386/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00386: ReduceLROnPlateau reducing learning rate to 2.1827873032285658e-14.
Epoch 387/400
164/164 [==============================] - 0s 134us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 388/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 389/400
164/164 [==============================] - 0s 110us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 390/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 391/400
164/164 [==============================] - 0s 128us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 392/400
164/164 [==============================] - 0s 122us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 393/400
164/164 [==============================] - 0s 122us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 394/400
164/164 [==============================] - 0s 140us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 395/400
164/164 [==============================] - 0s 122us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 396/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545

Epoch 00396: ReduceLROnPlateau reducing learning rate to 1.0913936516142829e-14.
Epoch 397/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 398/400
164/164 [==============================] - 0s 116us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 399/400
164/164 [==============================] - 0s 98us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
Epoch 400/400
164/164 [==============================] - 0s 104us/step - loss: 0.3288 - accuracy: 0.8963 - val_loss: 0.7426 - val_accuracy: 0.6545
In [145]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 400)
In [146]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
55/55 [==============================] - 0s 55us/step
test loss: 0.7425676215778697, test accuracy: 0.6545454263687134
In [147]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.6399456521739131
In [148]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
Kappa:  0.2943956785955435

KMeans

In [149]:
X
Out[149]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13
0 0.992062 -0.477172 -1.079451 -2.369470 -1.705431 -0.098594 -0.281836 -1.432001 -0.898623 0.130446 -0.024683 -0.312128 0.020392
1 0.843575 -0.507672 -0.731713 -0.334904 1.442336 -0.491141 -0.266416 -0.511246 1.004414 0.558777 0.127114 -1.667555 0.835458
2 0.816922 -0.263544 0.639646 -0.865417 1.276602 -0.245238 0.106722 -0.761365 -0.170481 -1.443667 -0.451102 1.196430 -0.037846
3 4.368525 0.851784 -0.671158 -0.128467 2.141169 -0.472725 -1.437233 -1.858760 1.581800 -0.145852 0.107228 1.458238 1.666081
4 0.001312 0.535305 -0.648296 0.221414 0.549478 0.736878 -0.439538 -0.138787 0.584258 0.095671 1.901833 2.909252 1.802578
5 -0.236754 0.488978 0.203743 0.088401 -0.151814 0.811707 -0.092973 0.153518 -0.936863 0.354100 0.123352 1.318569 1.097711
6 -0.842496 0.742173 0.068601 1.394492 -0.276167 1.301853 0.336343 1.077540 -1.118983 1.688235 -0.103661 1.224883 0.350956
7 -0.952702 1.078642 -0.563379 -0.018149 -0.073042 -0.591301 -1.392389 0.209234 0.725065 0.064350 0.034449 0.581953 2.151966
8 0.046457 -0.093025 -0.804385 0.542662 -0.130939 0.042792 1.198959 -0.559116 0.017192 -0.249308 0.747851 -0.035599 0.995166
9 -0.781158 0.099463 0.196737 2.462131 0.316140 -0.369698 2.196715 -0.800443 2.137687 1.438443 0.055279 -0.284437 1.702942
10 -0.906167 0.568017 0.700382 2.876646 -0.809125 -0.491839 1.801564 -2.406947 1.939246 1.397556 0.709408 -0.423394 1.773713
11 1.172687 1.292213 -0.402038 0.087342 0.324539 0.973336 -0.548282 0.781195 0.846038 0.464514 -1.030463 -0.559243 0.168727
12 0.367875 1.949889 0.516382 0.657124 -0.534306 0.575187 -0.750861 0.247200 -0.232297 0.332174 -0.426787 0.318763 0.083316
13 1.270520 1.194102 0.267933 0.676186 0.394734 -0.709975 -0.047626 1.113385 0.339962 0.424937 -0.528480 0.671225 0.078062
14 -0.095931 0.792392 0.626113 0.189989 0.315198 -0.175744 0.011713 -0.072196 0.742338 0.974567 0.935685 0.083454 0.970157
15 -0.322645 0.977766 0.685697 0.670670 0.997903 0.619018 0.498110 -0.016728 0.445370 -0.102204 0.199517 -0.315303 0.347920
16 0.565974 0.440551 0.402995 1.815814 1.906139 1.105013 1.256180 0.907086 0.592851 -0.159427 1.013051 -0.620202 1.259932
17 -0.863540 0.887127 1.387720 -0.082168 -0.694633 -0.810037 1.251697 -0.443532 0.307506 0.253798 -0.292483 0.030812 0.176350
18 -0.822258 -0.630193 -0.672294 -0.279417 -0.731983 -1.510167 -1.393705 -0.161872 0.722297 0.910604 -0.610303 0.380547 1.296315
19 -0.889164 0.641922 2.278761 0.190213 -0.341231 -0.624107 1.228820 -0.549441 -0.662942 0.481866 -0.541347 -1.061735 -0.122227
20 0.795964 0.484784 0.898919 0.027625 0.415359 0.271286 0.366966 -0.498975 0.300352 0.216702 0.361195 -0.771976 0.085971
21 0.168183 -0.077353 1.019887 -0.637065 0.731534 0.877245 1.225125 -0.566997 -0.452222 -1.105384 0.185636 -0.782808 -0.224975
22 0.510023 -0.099060 0.064384 -0.039933 0.786951 0.119530 -0.259052 -0.881354 -0.113425 1.191274 0.335443 -0.189618 -0.337688
23 0.216210 -0.069447 0.974822 -0.626273 0.835854 0.914236 1.226463 -0.369525 -0.398299 -1.146613 0.026274 -0.944475 -0.192948
24 -0.239273 -0.518568 -0.127834 0.045011 0.403223 0.368253 -0.584902 -0.905436 -0.405699 0.129383 0.809611 -0.174138 -0.115393
25 -1.241907 1.355534 -0.693470 0.793789 0.606007 0.930263 0.009323 -0.712463 0.037916 -0.182143 1.212760 -0.083882 0.639662
26 -0.847436 1.180146 -0.489592 1.189572 -0.457645 -0.163979 -0.010812 -0.765561 -0.347488 -0.216575 0.804302 -0.236378 0.481212
27 -0.378383 1.017722 -1.812001 0.443514 0.583209 1.709730 0.715521 -0.076610 0.416120 0.013436 0.420025 -0.925263 0.626400
28 0.245370 1.187084 1.056929 2.013063 -0.505622 1.228583 -1.158143 0.622932 0.113512 0.948397 0.008252 1.035839 -0.691702
29 -0.623386 1.368898 1.216933 1.961377 0.744541 1.555516 -1.205283 -0.252995 -0.325624 0.538668 0.197646 0.356450 -0.219812
... ... ... ... ... ... ... ... ... ... ... ... ... ...
189 -0.565077 0.809784 0.557457 0.815038 0.823053 -0.931359 -0.039244 -0.199068 0.083690 -0.235063 -0.030800 -0.564557 -0.253507
190 -0.602848 0.638838 0.763481 -0.424641 -0.810302 -0.951734 -0.732024 -0.504038 0.379372 0.748895 -0.593820 -0.772491 0.175752
191 -1.094031 -0.896961 0.400325 -1.635971 -1.099938 -1.091799 -0.593281 0.890889 0.984647 0.584509 0.318496 0.175062 -0.783524
192 -0.348357 0.944340 0.239675 0.003612 -1.370450 -0.996597 -0.616405 0.161481 -0.258760 0.534721 -0.431338 0.376456 -1.623026
193 2.110671 -1.005236 0.268022 0.459390 -1.985350 0.405677 -0.361571 -1.272053 -0.873345 2.111218 -0.246708 0.798456 1.067252
194 1.222194 -1.600122 -1.149302 0.230839 -0.213026 -1.572114 0.486447 -0.770701 0.244895 2.689114 -2.296486 0.718338 -1.220356
195 -0.509789 -0.757711 0.189267 0.516644 0.750906 -1.485714 2.485824 -1.204754 -3.373113 -0.450016 -1.091178 -0.474728 -0.522197
196 0.194175 -0.618441 -1.090420 0.233017 -1.492602 -0.342192 -1.612833 0.714990 0.072755 -0.026932 0.464029 0.212333 1.204262
197 0.297635 -0.727616 -1.927078 -0.145347 -0.990256 0.052935 -1.791108 -0.351333 -0.064903 0.201842 1.581215 1.084453 -0.168841
198 -0.271030 -0.575137 -1.005334 -0.238705 -0.931830 -1.319114 -0.668613 0.510822 0.209623 0.487577 0.154874 0.133768 1.259548
199 0.059096 -0.370313 -0.760047 0.706270 -2.488266 -1.336692 -0.683584 0.436366 -0.150281 -0.711308 -0.851205 0.253942 -0.052516
200 0.147539 -0.233608 -0.578016 0.870637 -2.418094 -1.286070 -0.692623 0.342693 0.015890 -0.795418 -1.221248 0.309493 -0.526480
201 -0.076214 -1.055629 0.159389 -0.403318 -0.111273 -1.325990 -0.867502 0.519381 0.192007 -0.024629 0.220420 0.551046 0.399728
202 1.468986 0.518464 1.475456 -1.400891 0.408186 -1.831201 1.474742 0.566660 -0.403197 -1.295176 -0.443787 -1.884346 -1.993491
203 -1.739107 0.192104 -0.670709 -1.236237 -1.672915 -0.680127 0.027148 0.524909 1.865754 -0.634310 -0.607429 -1.471191 -0.632982
204 -0.663868 -0.862566 -0.329803 -0.857680 0.167824 -0.013328 0.176565 0.125832 0.609671 -1.296827 -0.435986 -1.341223 -0.977207
205 -0.739818 -0.668220 -0.077479 0.026286 0.027801 0.040659 -0.161646 -1.046948 -1.248976 -0.449243 1.046834 1.381194 1.646325
206 0.475752 0.695473 -0.072097 1.081397 -0.366985 -2.008080 0.515734 0.005330 1.193800 -0.841825 -2.650200 -3.862624 -2.115507
207 -1.331365 -1.632552 -0.876636 0.076190 1.187799 1.138590 1.235955 1.583447 0.890342 -1.587964 0.546109 1.565567 1.756993
208 -0.397476 0.090963 1.217996 0.773741 1.107204 -1.125870 -0.915396 -1.130561 -1.914456 -0.664474 -0.226576 0.112420 0.235011
209 -0.465823 -1.372705 -0.445436 0.316510 -1.492946 -1.103783 0.353513 -0.311377 -1.095388 -0.615078 -0.585868 0.172807 -0.860564
210 -0.594535 -1.761364 -1.069906 -0.502969 -1.411276 -0.906350 -0.559102 -1.240920 -2.254196 -1.206339 -0.528047 0.924112 0.472298
211 -1.022693 0.373374 -0.104205 -0.815628 -0.574733 0.906934 0.765114 -0.015386 0.110695 1.832325 0.712557 -0.951976 -0.678869
212 -0.967902 0.155275 0.013938 -0.549105 -0.907792 0.881907 0.609589 -0.135010 -0.373473 1.152134 0.386511 -0.744687 -0.447017
213 -1.238242 -0.062983 -0.133082 -0.158458 -0.338086 -0.411874 0.964537 0.870379 0.530337 0.858339 0.489332 -1.190977 -1.340484
214 0.349761 -1.391267 -3.069473 0.840195 1.044391 -1.052018 1.004856 1.478511 1.210060 -1.145325 2.653757 1.937234 0.592139
215 0.782819 -1.300386 -0.487318 0.850960 -2.046427 1.050631 0.289069 2.400271 2.707288 -0.278238 0.152360 1.912210 -0.208225
216 1.847553 -1.059174 -0.808403 0.400706 -0.275009 0.409744 -0.141885 0.706348 0.476002 0.990111 -0.168504 0.856440 -0.395652
217 2.608478 0.174234 2.534211 -0.985597 -0.436400 3.751943 1.560179 -2.367095 1.272529 2.464209 -0.954336 0.310720 -1.209456
218 -0.069569 0.418008 -0.004324 1.330358 0.365352 -0.582788 -0.527444 -0.298114 -0.353021 -1.118883 -0.459230 -0.986241 -0.041010

219 rows × 13 columns

In [150]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[150]:
[2847.0,
 2572.5760570812117,
 2370.209947155015,
 2235.6129406180157,
 2112.951551625758,
 2041.1809211260454,
 1982.3615393500422,
 1899.0667595696164,
 1851.9267246215204,
 1760.4468946465518,
 1745.79714786859,
 1689.1350809615656,
 1657.4940102564742,
 1625.370413913055]
In [151]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[151]:
[<matplotlib.lines.Line2D at 0x1e82d2ffda0>]

K=3

In [152]:
kmeans_ch = KMeans(n_clusters=3, random_state=0, n_init=10)
kmeans_ch.fit(X)
Out[152]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=3, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [153]:
kmeans_ch.labels_
Out[153]:
array([1, 2, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 2,
       1, 2, 1, 0, 0, 0, 0, 0, 1, 0, 2, 0, 1, 1, 1, 2, 2, 2, 1, 1, 1, 0,
       0, 0, 1, 1, 2, 0, 2, 2, 0, 0, 0, 1, 1, 0, 1, 1, 1, 2, 0, 0, 1, 1,
       1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 2,
       0, 1, 2, 2, 2, 2, 1, 1, 1, 2, 2, 0, 1, 0, 2, 2, 2, 2, 1, 2, 2, 2,
       2, 0, 1, 2, 0, 0, 2, 2, 2, 2, 1, 1, 0, 0, 0, 2, 2, 2, 2, 2, 0, 0,
       0, 0, 1, 0, 2, 1, 1, 1, 2, 0, 1, 0, 0, 1, 1, 1, 2, 0, 1, 2, 2, 2,
       1, 1, 2, 2, 1, 2, 1, 1, 1, 1, 1, 0, 1, 0, 0, 2, 0, 2, 2, 2, 0, 2,
       2, 1, 2, 0, 0, 0, 0, 0, 0, 0, 2, 0, 1, 0, 1, 2, 1, 1, 1, 2, 1, 1,
       1, 1, 1, 1, 2, 2, 2, 1, 2, 0, 1, 1, 1, 2, 2, 2, 0, 0, 1, 0, 1])
In [154]:
clusters_ch = kmeans_ch.predict(X)
clusters_ch
Out[154]:
array([1, 2, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 2,
       1, 2, 1, 0, 0, 0, 0, 0, 1, 0, 2, 0, 1, 1, 1, 2, 2, 2, 1, 1, 1, 0,
       0, 0, 1, 1, 2, 0, 2, 2, 0, 0, 0, 1, 1, 0, 1, 1, 1, 2, 0, 0, 1, 1,
       1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 2,
       0, 1, 2, 2, 2, 2, 1, 1, 1, 2, 2, 0, 1, 0, 2, 2, 2, 2, 1, 2, 2, 2,
       2, 0, 1, 2, 0, 0, 2, 2, 2, 2, 1, 1, 0, 0, 0, 2, 2, 2, 2, 2, 0, 0,
       0, 0, 1, 0, 2, 1, 1, 1, 2, 0, 1, 0, 0, 1, 1, 1, 2, 0, 1, 2, 2, 2,
       1, 1, 2, 2, 1, 2, 1, 1, 1, 1, 1, 0, 1, 0, 0, 2, 0, 2, 2, 2, 0, 2,
       2, 1, 2, 0, 0, 0, 0, 0, 0, 0, 2, 0, 1, 0, 1, 2, 1, 1, 1, 2, 1, 1,
       1, 1, 1, 1, 2, 2, 2, 1, 2, 0, 1, 1, 1, 2, 2, 2, 0, 0, 1, 0, 1])
In [155]:
X.loc[:,'Cluster'] = clusters_ch
X.loc[:,'chosen'] = list(y)
In [156]:
X
Out[156]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13 Cluster chosen
0 0.992062 -0.477172 -1.079451 -2.369470 -1.705431 -0.098594 -0.281836 -1.432001 -0.898623 0.130446 -0.024683 -0.312128 0.020392 1 0
1 0.843575 -0.507672 -0.731713 -0.334904 1.442336 -0.491141 -0.266416 -0.511246 1.004414 0.558777 0.127114 -1.667555 0.835458 2 0
2 0.816922 -0.263544 0.639646 -0.865417 1.276602 -0.245238 0.106722 -0.761365 -0.170481 -1.443667 -0.451102 1.196430 -0.037846 1 0
3 4.368525 0.851784 -0.671158 -0.128467 2.141169 -0.472725 -1.437233 -1.858760 1.581800 -0.145852 0.107228 1.458238 1.666081 1 0
4 0.001312 0.535305 -0.648296 0.221414 0.549478 0.736878 -0.439538 -0.138787 0.584258 0.095671 1.901833 2.909252 1.802578 0 0
5 -0.236754 0.488978 0.203743 0.088401 -0.151814 0.811707 -0.092973 0.153518 -0.936863 0.354100 0.123352 1.318569 1.097711 0 0
6 -0.842496 0.742173 0.068601 1.394492 -0.276167 1.301853 0.336343 1.077540 -1.118983 1.688235 -0.103661 1.224883 0.350956 0 0
7 -0.952702 1.078642 -0.563379 -0.018149 -0.073042 -0.591301 -1.392389 0.209234 0.725065 0.064350 0.034449 0.581953 2.151966 1 0
8 0.046457 -0.093025 -0.804385 0.542662 -0.130939 0.042792 1.198959 -0.559116 0.017192 -0.249308 0.747851 -0.035599 0.995166 0 0
9 -0.781158 0.099463 0.196737 2.462131 0.316140 -0.369698 2.196715 -0.800443 2.137687 1.438443 0.055279 -0.284437 1.702942 0 0
10 -0.906167 0.568017 0.700382 2.876646 -0.809125 -0.491839 1.801564 -2.406947 1.939246 1.397556 0.709408 -0.423394 1.773713 0 0
11 1.172687 1.292213 -0.402038 0.087342 0.324539 0.973336 -0.548282 0.781195 0.846038 0.464514 -1.030463 -0.559243 0.168727 0 0
12 0.367875 1.949889 0.516382 0.657124 -0.534306 0.575187 -0.750861 0.247200 -0.232297 0.332174 -0.426787 0.318763 0.083316 0 0
13 1.270520 1.194102 0.267933 0.676186 0.394734 -0.709975 -0.047626 1.113385 0.339962 0.424937 -0.528480 0.671225 0.078062 0 0
14 -0.095931 0.792392 0.626113 0.189989 0.315198 -0.175744 0.011713 -0.072196 0.742338 0.974567 0.935685 0.083454 0.970157 0 0
15 -0.322645 0.977766 0.685697 0.670670 0.997903 0.619018 0.498110 -0.016728 0.445370 -0.102204 0.199517 -0.315303 0.347920 0 0
16 0.565974 0.440551 0.402995 1.815814 1.906139 1.105013 1.256180 0.907086 0.592851 -0.159427 1.013051 -0.620202 1.259932 0 0
17 -0.863540 0.887127 1.387720 -0.082168 -0.694633 -0.810037 1.251697 -0.443532 0.307506 0.253798 -0.292483 0.030812 0.176350 0 0
18 -0.822258 -0.630193 -0.672294 -0.279417 -0.731983 -1.510167 -1.393705 -0.161872 0.722297 0.910604 -0.610303 0.380547 1.296315 1 0
19 -0.889164 0.641922 2.278761 0.190213 -0.341231 -0.624107 1.228820 -0.549441 -0.662942 0.481866 -0.541347 -1.061735 -0.122227 2 0
20 0.795964 0.484784 0.898919 0.027625 0.415359 0.271286 0.366966 -0.498975 0.300352 0.216702 0.361195 -0.771976 0.085971 0 0
21 0.168183 -0.077353 1.019887 -0.637065 0.731534 0.877245 1.225125 -0.566997 -0.452222 -1.105384 0.185636 -0.782808 -0.224975 2 0
22 0.510023 -0.099060 0.064384 -0.039933 0.786951 0.119530 -0.259052 -0.881354 -0.113425 1.191274 0.335443 -0.189618 -0.337688 1 0
23 0.216210 -0.069447 0.974822 -0.626273 0.835854 0.914236 1.226463 -0.369525 -0.398299 -1.146613 0.026274 -0.944475 -0.192948 2 0
24 -0.239273 -0.518568 -0.127834 0.045011 0.403223 0.368253 -0.584902 -0.905436 -0.405699 0.129383 0.809611 -0.174138 -0.115393 1 0
25 -1.241907 1.355534 -0.693470 0.793789 0.606007 0.930263 0.009323 -0.712463 0.037916 -0.182143 1.212760 -0.083882 0.639662 0 0
26 -0.847436 1.180146 -0.489592 1.189572 -0.457645 -0.163979 -0.010812 -0.765561 -0.347488 -0.216575 0.804302 -0.236378 0.481212 0 0
27 -0.378383 1.017722 -1.812001 0.443514 0.583209 1.709730 0.715521 -0.076610 0.416120 0.013436 0.420025 -0.925263 0.626400 0 0
28 0.245370 1.187084 1.056929 2.013063 -0.505622 1.228583 -1.158143 0.622932 0.113512 0.948397 0.008252 1.035839 -0.691702 0 0
29 -0.623386 1.368898 1.216933 1.961377 0.744541 1.555516 -1.205283 -0.252995 -0.325624 0.538668 0.197646 0.356450 -0.219812 0 0
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
189 -0.565077 0.809784 0.557457 0.815038 0.823053 -0.931359 -0.039244 -0.199068 0.083690 -0.235063 -0.030800 -0.564557 -0.253507 0 1
190 -0.602848 0.638838 0.763481 -0.424641 -0.810302 -0.951734 -0.732024 -0.504038 0.379372 0.748895 -0.593820 -0.772491 0.175752 1 1
191 -1.094031 -0.896961 0.400325 -1.635971 -1.099938 -1.091799 -0.593281 0.890889 0.984647 0.584509 0.318496 0.175062 -0.783524 2 1
192 -0.348357 0.944340 0.239675 0.003612 -1.370450 -0.996597 -0.616405 0.161481 -0.258760 0.534721 -0.431338 0.376456 -1.623026 1 1
193 2.110671 -1.005236 0.268022 0.459390 -1.985350 0.405677 -0.361571 -1.272053 -0.873345 2.111218 -0.246708 0.798456 1.067252 1 1
194 1.222194 -1.600122 -1.149302 0.230839 -0.213026 -1.572114 0.486447 -0.770701 0.244895 2.689114 -2.296486 0.718338 -1.220356 1 1
195 -0.509789 -0.757711 0.189267 0.516644 0.750906 -1.485714 2.485824 -1.204754 -3.373113 -0.450016 -1.091178 -0.474728 -0.522197 2 1
196 0.194175 -0.618441 -1.090420 0.233017 -1.492602 -0.342192 -1.612833 0.714990 0.072755 -0.026932 0.464029 0.212333 1.204262 1 1
197 0.297635 -0.727616 -1.927078 -0.145347 -0.990256 0.052935 -1.791108 -0.351333 -0.064903 0.201842 1.581215 1.084453 -0.168841 1 1
198 -0.271030 -0.575137 -1.005334 -0.238705 -0.931830 -1.319114 -0.668613 0.510822 0.209623 0.487577 0.154874 0.133768 1.259548 1 1
199 0.059096 -0.370313 -0.760047 0.706270 -2.488266 -1.336692 -0.683584 0.436366 -0.150281 -0.711308 -0.851205 0.253942 -0.052516 1 1
200 0.147539 -0.233608 -0.578016 0.870637 -2.418094 -1.286070 -0.692623 0.342693 0.015890 -0.795418 -1.221248 0.309493 -0.526480 1 1
201 -0.076214 -1.055629 0.159389 -0.403318 -0.111273 -1.325990 -0.867502 0.519381 0.192007 -0.024629 0.220420 0.551046 0.399728 1 1
202 1.468986 0.518464 1.475456 -1.400891 0.408186 -1.831201 1.474742 0.566660 -0.403197 -1.295176 -0.443787 -1.884346 -1.993491 2 1
203 -1.739107 0.192104 -0.670709 -1.236237 -1.672915 -0.680127 0.027148 0.524909 1.865754 -0.634310 -0.607429 -1.471191 -0.632982 2 1
204 -0.663868 -0.862566 -0.329803 -0.857680 0.167824 -0.013328 0.176565 0.125832 0.609671 -1.296827 -0.435986 -1.341223 -0.977207 2 1
205 -0.739818 -0.668220 -0.077479 0.026286 0.027801 0.040659 -0.161646 -1.046948 -1.248976 -0.449243 1.046834 1.381194 1.646325 1 1
206 0.475752 0.695473 -0.072097 1.081397 -0.366985 -2.008080 0.515734 0.005330 1.193800 -0.841825 -2.650200 -3.862624 -2.115507 2 1
207 -1.331365 -1.632552 -0.876636 0.076190 1.187799 1.138590 1.235955 1.583447 0.890342 -1.587964 0.546109 1.565567 1.756993 0 1
208 -0.397476 0.090963 1.217996 0.773741 1.107204 -1.125870 -0.915396 -1.130561 -1.914456 -0.664474 -0.226576 0.112420 0.235011 1 1
209 -0.465823 -1.372705 -0.445436 0.316510 -1.492946 -1.103783 0.353513 -0.311377 -1.095388 -0.615078 -0.585868 0.172807 -0.860564 1 1
210 -0.594535 -1.761364 -1.069906 -0.502969 -1.411276 -0.906350 -0.559102 -1.240920 -2.254196 -1.206339 -0.528047 0.924112 0.472298 1 1
211 -1.022693 0.373374 -0.104205 -0.815628 -0.574733 0.906934 0.765114 -0.015386 0.110695 1.832325 0.712557 -0.951976 -0.678869 2 1
212 -0.967902 0.155275 0.013938 -0.549105 -0.907792 0.881907 0.609589 -0.135010 -0.373473 1.152134 0.386511 -0.744687 -0.447017 2 1
213 -1.238242 -0.062983 -0.133082 -0.158458 -0.338086 -0.411874 0.964537 0.870379 0.530337 0.858339 0.489332 -1.190977 -1.340484 2 1
214 0.349761 -1.391267 -3.069473 0.840195 1.044391 -1.052018 1.004856 1.478511 1.210060 -1.145325 2.653757 1.937234 0.592139 0 1
215 0.782819 -1.300386 -0.487318 0.850960 -2.046427 1.050631 0.289069 2.400271 2.707288 -0.278238 0.152360 1.912210 -0.208225 0 1
216 1.847553 -1.059174 -0.808403 0.400706 -0.275009 0.409744 -0.141885 0.706348 0.476002 0.990111 -0.168504 0.856440 -0.395652 1 1
217 2.608478 0.174234 2.534211 -0.985597 -0.436400 3.751943 1.560179 -2.367095 1.272529 2.464209 -0.954336 0.310720 -1.209456 0 1
218 -0.069569 0.418008 -0.004324 1.330358 0.365352 -0.582788 -0.527444 -0.298114 -0.353021 -1.118883 -0.459230 -0.986241 -0.041010 1 1

219 rows × 15 columns

In [157]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[157]:
<matplotlib.axes._subplots.AxesSubplot at 0x1e82d379160>
In [158]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[5]))

Urban Place

ANN

In [159]:
X = df_n_ps_std_ch[5]
In [160]:
y = df_n_ps[5]['chosen']
In [161]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [162]:
X_train.shape
Out[162]:
(168, 13)
In [163]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [164]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [165]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [ ]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
In [51]:
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [52]:
input_tensor = Input(shape = (n0,))
In [53]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = 'tanh')(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [54]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [55]:
model.summary()
Model: "model_2"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_2 (InputLayer)         (None, 13)                0         
_________________________________________________________________
dense_4 (Dense)              (None, 20)                280       
_________________________________________________________________
dense_5 (Dense)              (None, 20)                420       
_________________________________________________________________
dense_6 (Dense)              (None, 20)                420       
_________________________________________________________________
dense_7 (Dense)              (None, 1)                 21        
=================================================================
Total params: 1,141
Trainable params: 1,141
Non-trainable params: 0
_________________________________________________________________
In [56]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), batch_size= 32,
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 191 samples, validate on 64 samples
Epoch 1/2000
191/191 [==============================] - 2s 10ms/step - loss: 0.6523 - accuracy: 0.5864 - val_loss: 0.5471 - val_accuracy: 0.7031
Epoch 2/2000
191/191 [==============================] - 0s 89us/step - loss: 0.4800 - accuracy: 0.7696 - val_loss: 0.5294 - val_accuracy: 0.7500
Epoch 3/2000
191/191 [==============================] - 0s 68us/step - loss: 0.4421 - accuracy: 0.8010 - val_loss: 0.5305 - val_accuracy: 0.7812
Epoch 4/2000
191/191 [==============================] - 0s 63us/step - loss: 0.4243 - accuracy: 0.8168 - val_loss: 0.5390 - val_accuracy: 0.7500
Epoch 5/2000
191/191 [==============================] - 0s 73us/step - loss: 0.4080 - accuracy: 0.8168 - val_loss: 0.5368 - val_accuracy: 0.7500
Epoch 6/2000
191/191 [==============================] - 0s 58us/step - loss: 0.3924 - accuracy: 0.8168 - val_loss: 0.5567 - val_accuracy: 0.7188
Epoch 7/2000
191/191 [==============================] - 0s 89us/step - loss: 0.3782 - accuracy: 0.8168 - val_loss: 0.5325 - val_accuracy: 0.7500
Epoch 8/2000
191/191 [==============================] - 0s 63us/step - loss: 0.3562 - accuracy: 0.8429 - val_loss: 0.5605 - val_accuracy: 0.7500
Epoch 9/2000
191/191 [==============================] - 0s 63us/step - loss: 0.3462 - accuracy: 0.8586 - val_loss: 0.5968 - val_accuracy: 0.7188
Epoch 10/2000
191/191 [==============================] - 0s 89us/step - loss: 0.3171 - accuracy: 0.8639 - val_loss: 0.5665 - val_accuracy: 0.7500
Epoch 11/2000
191/191 [==============================] - 0s 68us/step - loss: 0.3107 - accuracy: 0.8639 - val_loss: 0.5456 - val_accuracy: 0.7656
Epoch 12/2000
191/191 [==============================] - 0s 68us/step - loss: 0.2855 - accuracy: 0.8743 - val_loss: 0.6107 - val_accuracy: 0.7500
Epoch 13/2000
191/191 [==============================] - 0s 58us/step - loss: 0.2626 - accuracy: 0.8848 - val_loss: 0.6076 - val_accuracy: 0.7656

Epoch 00013: ReduceLROnPlateau reducing learning rate to 0.004999999888241291.
Epoch 14/2000
191/191 [==============================] - 0s 94us/step - loss: 0.2396 - accuracy: 0.9215 - val_loss: 0.5932 - val_accuracy: 0.7656
Epoch 15/2000
191/191 [==============================] - 0s 68us/step - loss: 0.2269 - accuracy: 0.9162 - val_loss: 0.5980 - val_accuracy: 0.7812
Epoch 16/2000
191/191 [==============================] - 0s 63us/step - loss: 0.2133 - accuracy: 0.9372 - val_loss: 0.6289 - val_accuracy: 0.7656
Epoch 17/2000
191/191 [==============================] - 0s 68us/step - loss: 0.2023 - accuracy: 0.9372 - val_loss: 0.6384 - val_accuracy: 0.7656
Epoch 18/2000
191/191 [==============================] - 0s 73us/step - loss: 0.1904 - accuracy: 0.9424 - val_loss: 0.6197 - val_accuracy: 0.7656
Epoch 19/2000
191/191 [==============================] - 0s 63us/step - loss: 0.1817 - accuracy: 0.9372 - val_loss: 0.6255 - val_accuracy: 0.7656
Epoch 20/2000
191/191 [==============================] - 0s 68us/step - loss: 0.1725 - accuracy: 0.9424 - val_loss: 0.6415 - val_accuracy: 0.7500
Epoch 21/2000
191/191 [==============================] - 0s 73us/step - loss: 0.1603 - accuracy: 0.9424 - val_loss: 0.6400 - val_accuracy: 0.7656
Epoch 22/2000
191/191 [==============================] - 0s 58us/step - loss: 0.1473 - accuracy: 0.9581 - val_loss: 0.6420 - val_accuracy: 0.7500
Epoch 23/2000
191/191 [==============================] - 0s 58us/step - loss: 0.1367 - accuracy: 0.9738 - val_loss: 0.6448 - val_accuracy: 0.7656

Epoch 00023: ReduceLROnPlateau reducing learning rate to 0.0024999999441206455.
Epoch 24/2000
191/191 [==============================] - 0s 68us/step - loss: 0.1248 - accuracy: 0.9791 - val_loss: 0.6578 - val_accuracy: 0.7812
Epoch 25/2000
191/191 [==============================] - 0s 63us/step - loss: 0.1211 - accuracy: 0.9738 - val_loss: 0.6633 - val_accuracy: 0.7812
Epoch 26/2000
191/191 [==============================] - 0s 58us/step - loss: 0.1166 - accuracy: 0.9791 - val_loss: 0.6651 - val_accuracy: 0.7656
Epoch 27/2000
191/191 [==============================] - 0s 110us/step - loss: 0.1121 - accuracy: 0.9791 - val_loss: 0.6655 - val_accuracy: 0.7500
Epoch 28/2000
191/191 [==============================] - 0s 73us/step - loss: 0.1069 - accuracy: 0.9791 - val_loss: 0.6765 - val_accuracy: 0.7656
Epoch 29/2000
191/191 [==============================] - 0s 63us/step - loss: 0.1026 - accuracy: 0.9791 - val_loss: 0.6865 - val_accuracy: 0.7656
Epoch 30/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0981 - accuracy: 0.9843 - val_loss: 0.6846 - val_accuracy: 0.7656
Epoch 31/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0922 - accuracy: 0.9843 - val_loss: 0.6932 - val_accuracy: 0.7656
Epoch 32/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0883 - accuracy: 0.9843 - val_loss: 0.7066 - val_accuracy: 0.7656
Epoch 33/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0829 - accuracy: 0.9895 - val_loss: 0.7111 - val_accuracy: 0.7656

Epoch 00033: ReduceLROnPlateau reducing learning rate to 0.0012499999720603228.
Epoch 34/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0790 - accuracy: 0.9895 - val_loss: 0.7178 - val_accuracy: 0.7656
Epoch 35/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0767 - accuracy: 0.9895 - val_loss: 0.7203 - val_accuracy: 0.7656
Epoch 36/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0752 - accuracy: 0.9895 - val_loss: 0.7267 - val_accuracy: 0.7656
Epoch 37/2000
191/191 [==============================] - 0s 126us/step - loss: 0.0729 - accuracy: 0.9895 - val_loss: 0.7269 - val_accuracy: 0.7656
Epoch 38/2000
191/191 [==============================] - 0s 131us/step - loss: 0.0705 - accuracy: 0.9895 - val_loss: 0.7364 - val_accuracy: 0.7656
Epoch 39/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0686 - accuracy: 0.9895 - val_loss: 0.7434 - val_accuracy: 0.7656
Epoch 40/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0668 - accuracy: 0.9895 - val_loss: 0.7461 - val_accuracy: 0.7812
Epoch 41/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0648 - accuracy: 0.9895 - val_loss: 0.7476 - val_accuracy: 0.7812
Epoch 42/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0631 - accuracy: 0.9895 - val_loss: 0.7577 - val_accuracy: 0.7812
Epoch 43/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0610 - accuracy: 0.9895 - val_loss: 0.7680 - val_accuracy: 0.7812

Epoch 00043: ReduceLROnPlateau reducing learning rate to 0.0006249999860301614.
Epoch 44/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0592 - accuracy: 0.9895 - val_loss: 0.7672 - val_accuracy: 0.7812
Epoch 45/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0582 - accuracy: 0.9895 - val_loss: 0.7674 - val_accuracy: 0.7812
Epoch 46/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0576 - accuracy: 0.9895 - val_loss: 0.7688 - val_accuracy: 0.7812
Epoch 47/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0566 - accuracy: 0.9895 - val_loss: 0.7700 - val_accuracy: 0.7812
Epoch 48/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0557 - accuracy: 0.9895 - val_loss: 0.7748 - val_accuracy: 0.7812
Epoch 49/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0547 - accuracy: 0.9895 - val_loss: 0.7777 - val_accuracy: 0.7812
Epoch 50/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0540 - accuracy: 0.9948 - val_loss: 0.7821 - val_accuracy: 0.7812
Epoch 51/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0533 - accuracy: 0.9948 - val_loss: 0.7849 - val_accuracy: 0.7812
Epoch 52/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0525 - accuracy: 0.9948 - val_loss: 0.7842 - val_accuracy: 0.7812
Epoch 53/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0515 - accuracy: 0.9948 - val_loss: 0.7869 - val_accuracy: 0.7812

Epoch 00053: ReduceLROnPlateau reducing learning rate to 0.0003124999930150807.
Epoch 54/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0509 - accuracy: 0.9948 - val_loss: 0.7898 - val_accuracy: 0.7812
Epoch 55/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0504 - accuracy: 0.9948 - val_loss: 0.7899 - val_accuracy: 0.7812
Epoch 56/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0500 - accuracy: 0.9948 - val_loss: 0.7926 - val_accuracy: 0.7812
Epoch 57/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0496 - accuracy: 0.9948 - val_loss: 0.7942 - val_accuracy: 0.7812
Epoch 58/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0492 - accuracy: 0.9948 - val_loss: 0.7951 - val_accuracy: 0.7812
Epoch 59/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0487 - accuracy: 0.9948 - val_loss: 0.7963 - val_accuracy: 0.7812
Epoch 60/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0484 - accuracy: 0.9948 - val_loss: 0.7975 - val_accuracy: 0.7812
Epoch 61/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0480 - accuracy: 0.9948 - val_loss: 0.7996 - val_accuracy: 0.7812
Epoch 62/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0476 - accuracy: 0.9948 - val_loss: 0.8015 - val_accuracy: 0.7812
Epoch 63/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0472 - accuracy: 0.9948 - val_loss: 0.8032 - val_accuracy: 0.7656

Epoch 00063: ReduceLROnPlateau reducing learning rate to 0.00015624999650754035.
Epoch 64/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0468 - accuracy: 0.9948 - val_loss: 0.8047 - val_accuracy: 0.7656
Epoch 65/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0466 - accuracy: 0.9948 - val_loss: 0.8048 - val_accuracy: 0.7656
Epoch 66/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0464 - accuracy: 0.9948 - val_loss: 0.8054 - val_accuracy: 0.7656
Epoch 67/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0462 - accuracy: 0.9948 - val_loss: 0.8063 - val_accuracy: 0.7656
Epoch 68/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0461 - accuracy: 0.9948 - val_loss: 0.8067 - val_accuracy: 0.7656
Epoch 69/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0459 - accuracy: 0.9948 - val_loss: 0.8076 - val_accuracy: 0.7656
Epoch 70/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0456 - accuracy: 0.9948 - val_loss: 0.8089 - val_accuracy: 0.7656
Epoch 71/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0455 - accuracy: 0.9948 - val_loss: 0.8099 - val_accuracy: 0.7656
Epoch 72/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0452 - accuracy: 0.9948 - val_loss: 0.8107 - val_accuracy: 0.7656
Epoch 73/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0451 - accuracy: 0.9948 - val_loss: 0.8106 - val_accuracy: 0.7656

Epoch 00073: ReduceLROnPlateau reducing learning rate to 7.812499825377017e-05.
Epoch 74/2000
191/191 [==============================] - 0s 79us/step - loss: 0.0449 - accuracy: 0.9948 - val_loss: 0.8107 - val_accuracy: 0.7656
Epoch 75/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0448 - accuracy: 0.9948 - val_loss: 0.8113 - val_accuracy: 0.7656
Epoch 76/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0447 - accuracy: 0.9948 - val_loss: 0.8118 - val_accuracy: 0.7656
Epoch 77/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0446 - accuracy: 0.9948 - val_loss: 0.8122 - val_accuracy: 0.7656
Epoch 78/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0445 - accuracy: 0.9948 - val_loss: 0.8128 - val_accuracy: 0.7656
Epoch 79/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0444 - accuracy: 0.9948 - val_loss: 0.8131 - val_accuracy: 0.7656
Epoch 80/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0443 - accuracy: 0.9948 - val_loss: 0.8137 - val_accuracy: 0.7656
Epoch 81/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0442 - accuracy: 0.9948 - val_loss: 0.8142 - val_accuracy: 0.7656
Epoch 82/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0441 - accuracy: 0.9948 - val_loss: 0.8146 - val_accuracy: 0.7656
Epoch 83/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0440 - accuracy: 0.9948 - val_loss: 0.8150 - val_accuracy: 0.7656

Epoch 00083: ReduceLROnPlateau reducing learning rate to 3.9062499126885086e-05.
Epoch 84/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0439 - accuracy: 0.9948 - val_loss: 0.8152 - val_accuracy: 0.7656
Epoch 85/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0439 - accuracy: 0.9948 - val_loss: 0.8155 - val_accuracy: 0.7656
Epoch 86/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0438 - accuracy: 0.9948 - val_loss: 0.8155 - val_accuracy: 0.7656
Epoch 87/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0438 - accuracy: 0.9948 - val_loss: 0.8159 - val_accuracy: 0.7656
Epoch 88/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0437 - accuracy: 0.9948 - val_loss: 0.8161 - val_accuracy: 0.7656
Epoch 89/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0437 - accuracy: 0.9948 - val_loss: 0.8162 - val_accuracy: 0.7656
Epoch 90/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0436 - accuracy: 0.9948 - val_loss: 0.8164 - val_accuracy: 0.7656
Epoch 91/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0436 - accuracy: 0.9948 - val_loss: 0.8166 - val_accuracy: 0.7656
Epoch 92/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0435 - accuracy: 0.9948 - val_loss: 0.8170 - val_accuracy: 0.7656
Epoch 93/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0435 - accuracy: 0.9948 - val_loss: 0.8171 - val_accuracy: 0.7656

Epoch 00093: ReduceLROnPlateau reducing learning rate to 1.9531249563442543e-05.
Epoch 94/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0434 - accuracy: 0.9948 - val_loss: 0.8172 - val_accuracy: 0.7656
Epoch 95/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0434 - accuracy: 0.9948 - val_loss: 0.8174 - val_accuracy: 0.7656
Epoch 96/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0434 - accuracy: 0.9948 - val_loss: 0.8175 - val_accuracy: 0.7656
Epoch 97/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0433 - accuracy: 0.9948 - val_loss: 0.8174 - val_accuracy: 0.7656
Epoch 98/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0433 - accuracy: 0.9948 - val_loss: 0.8176 - val_accuracy: 0.7656
Epoch 99/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0433 - accuracy: 0.9948 - val_loss: 0.8177 - val_accuracy: 0.7656
Epoch 100/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0433 - accuracy: 0.9948 - val_loss: 0.8179 - val_accuracy: 0.7656
Epoch 101/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0432 - accuracy: 0.9948 - val_loss: 0.8180 - val_accuracy: 0.7656
Epoch 102/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0432 - accuracy: 0.9948 - val_loss: 0.8180 - val_accuracy: 0.7656
Epoch 103/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0432 - accuracy: 0.9948 - val_loss: 0.8182 - val_accuracy: 0.7656

Epoch 00103: ReduceLROnPlateau reducing learning rate to 9.765624781721272e-06.
Epoch 104/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0432 - accuracy: 0.9948 - val_loss: 0.8182 - val_accuracy: 0.7656
Epoch 105/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0431 - accuracy: 0.9948 - val_loss: 0.8183 - val_accuracy: 0.7656
Epoch 106/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0431 - accuracy: 0.9948 - val_loss: 0.8183 - val_accuracy: 0.7656
Epoch 107/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0431 - accuracy: 0.9948 - val_loss: 0.8184 - val_accuracy: 0.7656
Epoch 108/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0431 - accuracy: 0.9948 - val_loss: 0.8185 - val_accuracy: 0.7656
Epoch 109/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0431 - accuracy: 0.9948 - val_loss: 0.8185 - val_accuracy: 0.7656
Epoch 110/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0431 - accuracy: 0.9948 - val_loss: 0.8186 - val_accuracy: 0.7656
Epoch 111/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0431 - accuracy: 0.9948 - val_loss: 0.8186 - val_accuracy: 0.7656
Epoch 112/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0431 - accuracy: 0.9948 - val_loss: 0.8187 - val_accuracy: 0.7656
Epoch 113/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8187 - val_accuracy: 0.7656

Epoch 00113: ReduceLROnPlateau reducing learning rate to 4.882812390860636e-06.
Epoch 114/2000
191/191 [==============================] - 0s 47us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8188 - val_accuracy: 0.7656
Epoch 115/2000
191/191 [==============================] - 0s 47us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8188 - val_accuracy: 0.7656
Epoch 116/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8188 - val_accuracy: 0.7656
Epoch 117/2000
191/191 [==============================] - 0s 47us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8189 - val_accuracy: 0.7656
Epoch 118/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8189 - val_accuracy: 0.7656
Epoch 119/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8189 - val_accuracy: 0.7656
Epoch 120/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8190 - val_accuracy: 0.7656
Epoch 121/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8190 - val_accuracy: 0.7656
Epoch 122/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8190 - val_accuracy: 0.7656
Epoch 123/2000
191/191 [==============================] - 0s 47us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8190 - val_accuracy: 0.7656

Epoch 00123: ReduceLROnPlateau reducing learning rate to 2.441406195430318e-06.
Epoch 124/2000
191/191 [==============================] - 0s 47us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8190 - val_accuracy: 0.7656
Epoch 125/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8191 - val_accuracy: 0.7656
Epoch 126/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8191 - val_accuracy: 0.7656
Epoch 127/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8191 - val_accuracy: 0.7656
Epoch 128/2000
191/191 [==============================] - 0s 47us/step - loss: 0.0430 - accuracy: 0.9948 - val_loss: 0.8191 - val_accuracy: 0.7656
Epoch 129/2000
191/191 [==============================] - 0s 47us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8191 - val_accuracy: 0.7656
Epoch 130/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8191 - val_accuracy: 0.7656
Epoch 131/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8191 - val_accuracy: 0.7656
Epoch 132/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8192 - val_accuracy: 0.7656
Epoch 133/2000
191/191 [==============================] - 0s 47us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8192 - val_accuracy: 0.7656

Epoch 00133: ReduceLROnPlateau reducing learning rate to 1.220703097715159e-06.
Epoch 134/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8192 - val_accuracy: 0.7656
Epoch 135/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8192 - val_accuracy: 0.7656
Epoch 136/2000
191/191 [==============================] - 0s 47us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8192 - val_accuracy: 0.7656
Epoch 137/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8192 - val_accuracy: 0.7656
Epoch 138/2000
191/191 [==============================] - 0s 47us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8192 - val_accuracy: 0.7656
Epoch 139/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8192 - val_accuracy: 0.7656
Epoch 140/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8192 - val_accuracy: 0.7656
Epoch 141/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8192 - val_accuracy: 0.7656
Epoch 142/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8192 - val_accuracy: 0.7656
Epoch 143/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8192 - val_accuracy: 0.7656

Epoch 00143: ReduceLROnPlateau reducing learning rate to 6.103515488575795e-07.
Epoch 144/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 145/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 146/2000
191/191 [==============================] - 0s 52us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 147/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 148/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 149/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 150/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 151/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 152/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 153/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00153: ReduceLROnPlateau reducing learning rate to 3.0517577442878974e-07.
Epoch 154/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 155/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 156/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 157/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 158/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 159/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 160/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 161/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 162/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 163/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00163: ReduceLROnPlateau reducing learning rate to 1.5258788721439487e-07.
Epoch 164/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 165/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 166/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 167/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 168/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 169/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 170/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 171/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 172/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 173/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00173: ReduceLROnPlateau reducing learning rate to 7.629394360719743e-08.
Epoch 174/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 175/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 176/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 177/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 178/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 179/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 180/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 181/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 182/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 183/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00183: ReduceLROnPlateau reducing learning rate to 3.814697180359872e-08.
Epoch 184/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 185/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 186/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 187/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 188/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 189/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 190/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 191/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 192/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 193/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00193: ReduceLROnPlateau reducing learning rate to 1.907348590179936e-08.
Epoch 194/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 195/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 196/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 197/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 198/2000
191/191 [==============================] - 0s 79us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 199/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 200/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 201/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 202/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 203/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00203: ReduceLROnPlateau reducing learning rate to 9.53674295089968e-09.
Epoch 204/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 205/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 206/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 207/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 208/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 209/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 210/2000
191/191 [==============================] - ETA: 0s - loss: 0.0166 - accuracy: 1.00 - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 211/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 212/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 213/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00213: ReduceLROnPlateau reducing learning rate to 4.76837147544984e-09.
Epoch 214/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 215/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 216/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 217/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 218/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 219/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 220/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 221/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 222/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 223/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00223: ReduceLROnPlateau reducing learning rate to 2.38418573772492e-09.
Epoch 224/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 225/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 226/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 227/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 228/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 229/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 230/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 231/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 232/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 233/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00233: ReduceLROnPlateau reducing learning rate to 1.19209286886246e-09.
Epoch 234/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 235/2000
191/191 [==============================] - ETA: 0s - loss: 0.0224 - accuracy: 1.00 - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 236/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 237/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 238/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 239/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 240/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 241/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 242/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 243/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00243: ReduceLROnPlateau reducing learning rate to 5.9604643443123e-10.
Epoch 244/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 245/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 246/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 247/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 248/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 249/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 250/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 251/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 252/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 253/2000
191/191 [==============================] - ETA: 0s - loss: 0.0256 - accuracy: 1.00 - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00253: ReduceLROnPlateau reducing learning rate to 2.98023217215615e-10.
Epoch 254/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 255/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 256/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 257/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 258/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 259/2000
191/191 [==============================] - 0s 79us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 260/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 261/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 262/2000
191/191 [==============================] - ETA: 0s - loss: 0.0223 - accuracy: 1.00 - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 263/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00263: ReduceLROnPlateau reducing learning rate to 1.490116086078075e-10.
Epoch 264/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 265/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 266/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 267/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 268/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 269/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 270/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 271/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 272/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 273/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00273: ReduceLROnPlateau reducing learning rate to 7.450580430390374e-11.
Epoch 274/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 275/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 276/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 277/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 278/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 279/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 280/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 281/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 282/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 283/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00283: ReduceLROnPlateau reducing learning rate to 3.725290215195187e-11.
Epoch 284/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 285/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 286/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 287/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 288/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 289/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 290/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 291/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 292/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 293/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00293: ReduceLROnPlateau reducing learning rate to 1.8626451075975936e-11.
Epoch 294/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 295/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 296/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 297/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 298/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 299/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 300/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 301/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 302/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 303/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00303: ReduceLROnPlateau reducing learning rate to 9.313225537987968e-12.
Epoch 304/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 305/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 306/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 307/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 308/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 309/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 310/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 311/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 312/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 313/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00313: ReduceLROnPlateau reducing learning rate to 4.656612768993984e-12.
Epoch 314/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 315/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 316/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 317/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 318/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 319/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 320/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 321/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 322/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 323/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00323: ReduceLROnPlateau reducing learning rate to 2.328306384496992e-12.
Epoch 324/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 325/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 326/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 327/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 328/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 329/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 330/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 331/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 332/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 333/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00333: ReduceLROnPlateau reducing learning rate to 1.164153192248496e-12.
Epoch 334/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 335/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 336/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 337/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 338/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 339/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 340/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 341/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 342/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 343/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00343: ReduceLROnPlateau reducing learning rate to 5.82076596124248e-13.
Epoch 344/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 345/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 346/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 347/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 348/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 349/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 350/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 351/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 352/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 353/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00353: ReduceLROnPlateau reducing learning rate to 2.91038298062124e-13.
Epoch 354/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 355/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 356/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 357/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 358/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 359/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 360/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 361/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 362/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 363/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00363: ReduceLROnPlateau reducing learning rate to 1.45519149031062e-13.
Epoch 364/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 365/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 366/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 367/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 368/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 369/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 370/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 371/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 372/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 373/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00373: ReduceLROnPlateau reducing learning rate to 7.2759574515531e-14.
Epoch 374/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 375/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 376/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 377/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 378/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 379/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 380/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 381/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 382/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 383/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00383: ReduceLROnPlateau reducing learning rate to 3.63797872577655e-14.
Epoch 384/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 385/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 386/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 387/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 388/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 389/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 390/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 391/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 392/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 393/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00393: ReduceLROnPlateau reducing learning rate to 1.818989362888275e-14.
Epoch 394/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 395/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 396/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 397/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 398/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 399/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 400/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 401/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 402/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 403/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00403: ReduceLROnPlateau reducing learning rate to 9.094946814441375e-15.
Epoch 404/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 405/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 406/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 407/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 408/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 409/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 410/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 411/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 412/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 413/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00413: ReduceLROnPlateau reducing learning rate to 4.5474734072206875e-15.
Epoch 414/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 415/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 416/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 417/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 418/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 419/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 420/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 421/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 422/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 423/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00423: ReduceLROnPlateau reducing learning rate to 2.2737367036103438e-15.
Epoch 424/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 425/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 426/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 427/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 428/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 429/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 430/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 431/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 432/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 433/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00433: ReduceLROnPlateau reducing learning rate to 1.1368683518051719e-15.
Epoch 434/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 435/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 436/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 437/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 438/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 439/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 440/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 441/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 442/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 443/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00443: ReduceLROnPlateau reducing learning rate to 5.684341759025859e-16.
Epoch 444/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 445/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 446/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 447/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 448/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 449/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 450/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 451/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 452/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 453/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00453: ReduceLROnPlateau reducing learning rate to 2.8421708795129297e-16.
Epoch 454/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 455/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 456/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 457/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 458/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 459/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 460/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 461/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 462/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 463/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00463: ReduceLROnPlateau reducing learning rate to 1.4210854397564648e-16.
Epoch 464/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 465/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 466/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 467/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 468/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 469/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 470/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 471/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 472/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 473/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00473: ReduceLROnPlateau reducing learning rate to 7.105427198782324e-17.
Epoch 474/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 475/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 476/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 477/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 478/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 479/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 480/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 481/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 482/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 483/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00483: ReduceLROnPlateau reducing learning rate to 3.552713599391162e-17.
Epoch 484/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 485/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 486/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 487/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 488/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 489/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 490/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 491/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 492/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 493/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00493: ReduceLROnPlateau reducing learning rate to 1.776356799695581e-17.
Epoch 494/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 495/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 496/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 497/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 498/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 499/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 500/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 501/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 502/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 503/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00503: ReduceLROnPlateau reducing learning rate to 8.881783998477905e-18.
Epoch 504/2000
191/191 [==============================] - 0s 152us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 505/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 506/2000
191/191 [==============================] - 0s 126us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 507/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 508/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 509/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 510/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 511/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 512/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 513/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00513: ReduceLROnPlateau reducing learning rate to 4.440891999238953e-18.
Epoch 514/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 515/2000
191/191 [==============================] - 0s 126us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 516/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 517/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 518/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 519/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 520/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 521/2000
191/191 [==============================] - 0s 141us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 522/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 523/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00523: ReduceLROnPlateau reducing learning rate to 2.2204459996194763e-18.
Epoch 524/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 525/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 526/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 527/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 528/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 529/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 530/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 531/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 532/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 533/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00533: ReduceLROnPlateau reducing learning rate to 1.1102229998097382e-18.
Epoch 534/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 535/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 536/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 537/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 538/2000
191/191 [==============================] - 0s 136us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 539/2000
191/191 [==============================] - 0s 126us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 540/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 541/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 542/2000
191/191 [==============================] - ETA: 0s - loss: 0.0396 - accuracy: 1.00 - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 543/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00543: ReduceLROnPlateau reducing learning rate to 5.551114999048691e-19.
Epoch 544/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 545/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 546/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 547/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 548/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 549/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 550/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 551/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 552/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 553/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00553: ReduceLROnPlateau reducing learning rate to 2.7755574995243454e-19.
Epoch 554/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 555/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 556/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 557/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 558/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 559/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 560/2000
191/191 [==============================] - 0s 136us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 561/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 562/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 563/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00563: ReduceLROnPlateau reducing learning rate to 1.3877787497621727e-19.
Epoch 564/2000
191/191 [==============================] - 0s 303us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 565/2000
191/191 [==============================] - 0s 288us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 566/2000
191/191 [==============================] - 0s 194us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 567/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 568/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 569/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 570/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 571/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 572/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 573/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00573: ReduceLROnPlateau reducing learning rate to 6.938893748810864e-20.
Epoch 574/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 575/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 576/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 577/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 578/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 579/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 580/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 581/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 582/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 583/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00583: ReduceLROnPlateau reducing learning rate to 3.469446874405432e-20.
Epoch 584/2000
191/191 [==============================] - 0s 152us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 585/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 586/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 587/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 588/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 589/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 590/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 591/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 592/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 593/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00593: ReduceLROnPlateau reducing learning rate to 1.734723437202716e-20.
Epoch 594/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 595/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 596/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 597/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 598/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 599/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 600/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 601/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 602/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 603/2000
191/191 [==============================] - 0s 173us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00603: ReduceLROnPlateau reducing learning rate to 8.67361718601358e-21.
Epoch 604/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 605/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 606/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 607/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 608/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 609/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 610/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 611/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 612/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 613/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00613: ReduceLROnPlateau reducing learning rate to 4.33680859300679e-21.
Epoch 614/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 615/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 616/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 617/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 618/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 619/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 620/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 621/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 622/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 623/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00623: ReduceLROnPlateau reducing learning rate to 2.168404296503395e-21.
Epoch 624/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 625/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 626/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 627/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 628/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 629/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 630/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 631/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 632/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 633/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00633: ReduceLROnPlateau reducing learning rate to 1.0842021482516974e-21.
Epoch 634/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 635/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 636/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 637/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 638/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 639/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 640/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 641/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 642/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 643/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00643: ReduceLROnPlateau reducing learning rate to 5.421010741258487e-22.
Epoch 644/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 645/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 646/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 647/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 648/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 649/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 650/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 651/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 652/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 653/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00653: ReduceLROnPlateau reducing learning rate to 2.7105053706292436e-22.
Epoch 654/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 655/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 656/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 657/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 658/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 659/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 660/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 661/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 662/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 663/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00663: ReduceLROnPlateau reducing learning rate to 1.3552526853146218e-22.
Epoch 664/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 665/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 666/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 667/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 668/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 669/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 670/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 671/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 672/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 673/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00673: ReduceLROnPlateau reducing learning rate to 6.776263426573109e-23.
Epoch 674/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 675/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 676/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 677/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 678/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 679/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 680/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 681/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 682/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 683/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00683: ReduceLROnPlateau reducing learning rate to 3.3881317132865545e-23.
Epoch 684/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 685/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 686/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 687/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 688/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 689/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 690/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 691/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 692/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 693/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00693: ReduceLROnPlateau reducing learning rate to 1.6940658566432772e-23.
Epoch 694/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 695/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 696/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 697/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 698/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 699/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 700/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 701/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 702/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 703/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00703: ReduceLROnPlateau reducing learning rate to 8.470329283216386e-24.
Epoch 704/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 705/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 706/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 707/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 708/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 709/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 710/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 711/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 712/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 713/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00713: ReduceLROnPlateau reducing learning rate to 4.235164641608193e-24.
Epoch 714/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 715/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 716/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 717/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 718/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 719/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 720/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 721/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 722/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 723/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00723: ReduceLROnPlateau reducing learning rate to 2.1175823208040965e-24.
Epoch 724/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 725/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 726/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 727/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 728/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 729/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 730/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 731/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 732/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 733/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00733: ReduceLROnPlateau reducing learning rate to 1.0587911604020483e-24.
Epoch 734/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 735/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 736/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 737/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 738/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 739/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 740/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 741/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 742/2000
191/191 [==============================] - 0s 79us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 743/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00743: ReduceLROnPlateau reducing learning rate to 5.293955802010241e-25.
Epoch 744/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 745/2000
191/191 [==============================] - ETA: 0s - loss: 0.0317 - accuracy: 1.00 - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 746/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 747/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 748/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 749/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 750/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 751/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 752/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 753/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00753: ReduceLROnPlateau reducing learning rate to 2.6469779010051207e-25.
Epoch 754/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 755/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 756/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 757/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 758/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 759/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 760/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 761/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 762/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 763/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00763: ReduceLROnPlateau reducing learning rate to 1.3234889505025603e-25.
Epoch 764/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 765/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 766/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 767/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 768/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 769/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 770/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 771/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 772/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 773/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00773: ReduceLROnPlateau reducing learning rate to 6.617444752512802e-26.
Epoch 774/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 775/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 776/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 777/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 778/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 779/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 780/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 781/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 782/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 783/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00783: ReduceLROnPlateau reducing learning rate to 3.308722376256401e-26.
Epoch 784/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 785/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 786/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 787/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 788/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 789/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 790/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 791/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 792/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 793/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00793: ReduceLROnPlateau reducing learning rate to 1.6543611881282004e-26.
Epoch 794/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 795/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 796/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 797/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 798/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 799/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 800/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 801/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 802/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 803/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00803: ReduceLROnPlateau reducing learning rate to 8.271805940641002e-27.
Epoch 804/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 805/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 806/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 807/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 808/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 809/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 810/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 811/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 812/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 813/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00813: ReduceLROnPlateau reducing learning rate to 4.135902970320501e-27.
Epoch 814/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 815/2000
191/191 [==============================] - 0s 126us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 816/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 817/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 818/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 819/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 820/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 821/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 822/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 823/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00823: ReduceLROnPlateau reducing learning rate to 2.0679514851602505e-27.
Epoch 824/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 825/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 826/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 827/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 828/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 829/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 830/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 831/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 832/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 833/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00833: ReduceLROnPlateau reducing learning rate to 1.0339757425801253e-27.
Epoch 834/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 835/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 836/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 837/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 838/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 839/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 840/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 841/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 842/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 843/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00843: ReduceLROnPlateau reducing learning rate to 5.169878712900626e-28.
Epoch 844/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 845/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 846/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 847/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 848/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 849/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 850/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 851/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 852/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 853/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00853: ReduceLROnPlateau reducing learning rate to 2.584939356450313e-28.
Epoch 854/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 855/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 856/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 857/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 858/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 859/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 860/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 861/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 862/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 863/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00863: ReduceLROnPlateau reducing learning rate to 1.2924696782251566e-28.
Epoch 864/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 865/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 866/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 867/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 868/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 869/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 870/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 871/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 872/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 873/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00873: ReduceLROnPlateau reducing learning rate to 6.462348391125783e-29.
Epoch 874/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 875/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 876/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 877/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 878/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 879/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 880/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 881/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 882/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 883/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00883: ReduceLROnPlateau reducing learning rate to 3.2311741955628914e-29.
Epoch 884/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 885/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 886/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 887/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 888/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 889/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 890/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 891/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 892/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 893/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00893: ReduceLROnPlateau reducing learning rate to 1.6155870977814457e-29.
Epoch 894/2000
191/191 [==============================] - 0s 79us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 895/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 896/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 897/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 898/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 899/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 900/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 901/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 902/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 903/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00903: ReduceLROnPlateau reducing learning rate to 8.077935488907229e-30.
Epoch 904/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 905/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 906/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 907/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 908/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 909/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 910/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 911/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 912/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 913/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00913: ReduceLROnPlateau reducing learning rate to 4.038967744453614e-30.
Epoch 914/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 915/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 916/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 917/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 918/2000
191/191 [==============================] - ETA: 0s - loss: 0.0296 - accuracy: 1.00 - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 919/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 920/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 921/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 922/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 923/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00923: ReduceLROnPlateau reducing learning rate to 2.019483872226807e-30.
Epoch 924/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 925/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 926/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 927/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 928/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 929/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 930/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 931/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 932/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 933/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00933: ReduceLROnPlateau reducing learning rate to 1.0097419361134036e-30.
Epoch 934/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 935/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 936/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 937/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 938/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 939/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 940/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 941/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 942/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 943/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00943: ReduceLROnPlateau reducing learning rate to 5.048709680567018e-31.
Epoch 944/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 945/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 946/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 947/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 948/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 949/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 950/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 951/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 952/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 953/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00953: ReduceLROnPlateau reducing learning rate to 2.524354840283509e-31.
Epoch 954/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 955/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 956/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 957/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 958/2000
191/191 [==============================] - 0s 79us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 959/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 960/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 961/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 962/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 963/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00963: ReduceLROnPlateau reducing learning rate to 1.2621774201417545e-31.
Epoch 964/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 965/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 966/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 967/2000
191/191 [==============================] - 0s 79us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 968/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 969/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 970/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 971/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 972/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 973/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00973: ReduceLROnPlateau reducing learning rate to 6.310887100708772e-32.
Epoch 974/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 975/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 976/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 977/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 978/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 979/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 980/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 981/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 982/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 983/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00983: ReduceLROnPlateau reducing learning rate to 3.155443550354386e-32.
Epoch 984/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 985/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 986/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 987/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 988/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 989/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 990/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 991/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 992/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 993/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 00993: ReduceLROnPlateau reducing learning rate to 1.577721775177193e-32.
Epoch 994/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 995/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 996/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 997/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 998/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 999/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1000/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1001/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1002/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1003/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01003: ReduceLROnPlateau reducing learning rate to 7.888608875885965e-33.
Epoch 1004/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1005/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1006/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1007/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1008/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1009/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1010/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1011/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1012/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1013/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01013: ReduceLROnPlateau reducing learning rate to 3.944304437942983e-33.
Epoch 1014/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1015/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1016/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1017/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1018/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1019/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1020/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1021/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1022/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1023/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01023: ReduceLROnPlateau reducing learning rate to 1.9721522189714914e-33.
Epoch 1024/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1025/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1026/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1027/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1028/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1029/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1030/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1031/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1032/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1033/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01033: ReduceLROnPlateau reducing learning rate to 9.860761094857457e-34.
Epoch 1034/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1035/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1036/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1037/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1038/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1039/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1040/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1041/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1042/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1043/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01043: ReduceLROnPlateau reducing learning rate to 4.930380547428728e-34.
Epoch 1044/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1045/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1046/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1047/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1048/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1049/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1050/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1051/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1052/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1053/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01053: ReduceLROnPlateau reducing learning rate to 2.465190273714364e-34.
Epoch 1054/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1055/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1056/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1057/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1058/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1059/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1060/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1061/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1062/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1063/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01063: ReduceLROnPlateau reducing learning rate to 1.232595136857182e-34.
Epoch 1064/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1065/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1066/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1067/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1068/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1069/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1070/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1071/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1072/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1073/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01073: ReduceLROnPlateau reducing learning rate to 6.16297568428591e-35.
Epoch 1074/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1075/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1076/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1077/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1078/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1079/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1080/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1081/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1082/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1083/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01083: ReduceLROnPlateau reducing learning rate to 3.081487842142955e-35.
Epoch 1084/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1085/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1086/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1087/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1088/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1089/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1090/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1091/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1092/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1093/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01093: ReduceLROnPlateau reducing learning rate to 1.5407439210714776e-35.
Epoch 1094/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1095/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1096/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1097/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1098/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1099/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1100/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1101/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1102/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1103/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01103: ReduceLROnPlateau reducing learning rate to 7.703719605357388e-36.
Epoch 1104/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1105/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1106/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1107/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1108/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1109/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1110/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1111/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1112/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1113/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01113: ReduceLROnPlateau reducing learning rate to 3.851859802678694e-36.
Epoch 1114/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1115/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1116/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1117/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1118/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1119/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1120/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1121/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1122/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1123/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01123: ReduceLROnPlateau reducing learning rate to 1.925929901339347e-36.
Epoch 1124/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1125/2000
191/191 [==============================] - ETA: 0s - loss: 0.0433 - accuracy: 1.00 - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1126/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1127/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1128/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1129/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1130/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1131/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1132/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1133/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01133: ReduceLROnPlateau reducing learning rate to 9.629649506696735e-37.
Epoch 1134/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1135/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1136/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1137/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1138/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1139/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1140/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1141/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1142/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1143/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01143: ReduceLROnPlateau reducing learning rate to 4.8148247533483676e-37.
Epoch 1144/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1145/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1146/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1147/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1148/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1149/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1150/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1151/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1152/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1153/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01153: ReduceLROnPlateau reducing learning rate to 2.4074123766741838e-37.
Epoch 1154/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1155/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1156/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1157/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1158/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1159/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1160/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1161/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1162/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1163/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01163: ReduceLROnPlateau reducing learning rate to 1.2037061883370919e-37.
Epoch 1164/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1165/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1166/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1167/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1168/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1169/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1170/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1171/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1172/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1173/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01173: ReduceLROnPlateau reducing learning rate to 6.018530941685459e-38.
Epoch 1174/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1175/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1176/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1177/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1178/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1179/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1180/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1181/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1182/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1183/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01183: ReduceLROnPlateau reducing learning rate to 3.0092654708427297e-38.
Epoch 1184/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1185/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1186/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1187/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1188/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1189/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1190/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1191/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1192/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1193/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01193: ReduceLROnPlateau reducing learning rate to 1.5046327354213649e-38.
Epoch 1194/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1195/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1196/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1197/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1198/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1199/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1200/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1201/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1202/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1203/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01203: ReduceLROnPlateau reducing learning rate to 7.523163677106824e-39.
Epoch 1204/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1205/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1206/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1207/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1208/2000
191/191 [==============================] - 0s 79us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1209/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1210/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1211/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1212/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1213/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01213: ReduceLROnPlateau reducing learning rate to 3.761581838553412e-39.
Epoch 1214/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1215/2000
191/191 [==============================] - ETA: 0s - loss: 0.0311 - accuracy: 1.00 - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1216/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1217/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1218/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1219/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1220/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1221/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1222/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1223/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01223: ReduceLROnPlateau reducing learning rate to 1.88079056895209e-39.
Epoch 1224/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1225/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1226/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1227/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1228/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1229/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1230/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1231/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1232/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1233/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01233: ReduceLROnPlateau reducing learning rate to 9.40395284476045e-40.
Epoch 1234/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1235/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1236/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1237/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1238/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1239/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1240/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1241/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1242/2000
191/191 [==============================] - 0s 79us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1243/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01243: ReduceLROnPlateau reducing learning rate to 4.701972919134064e-40.
Epoch 1244/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1245/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1246/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1247/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1248/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1249/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1250/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1251/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1252/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1253/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01253: ReduceLROnPlateau reducing learning rate to 2.350986459567032e-40.
Epoch 1254/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1255/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1256/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1257/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1258/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1259/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1260/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1261/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1262/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1263/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01263: ReduceLROnPlateau reducing learning rate to 1.175493229783516e-40.
Epoch 1264/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1265/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1266/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1267/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1268/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1269/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1270/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1271/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1272/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1273/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01273: ReduceLROnPlateau reducing learning rate to 5.87746614891758e-41.
Epoch 1274/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1275/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1276/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1277/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1278/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1279/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1280/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1281/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1282/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1283/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01283: ReduceLROnPlateau reducing learning rate to 2.93873307445879e-41.
Epoch 1284/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1285/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1286/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1287/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1288/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1289/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1290/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1291/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1292/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1293/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01293: ReduceLROnPlateau reducing learning rate to 1.4694015696910032e-41.
Epoch 1294/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1295/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1296/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1297/2000
191/191 [==============================] - ETA: 0s - loss: 0.0313 - accuracy: 1.00 - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1298/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1299/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1300/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1301/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1302/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1303/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01303: ReduceLROnPlateau reducing learning rate to 7.347007848455016e-42.
Epoch 1304/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1305/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1306/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1307/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1308/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1309/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1310/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1311/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1312/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1313/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01313: ReduceLROnPlateau reducing learning rate to 3.673503924227508e-42.
Epoch 1314/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1315/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1316/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1317/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1318/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1319/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1320/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1321/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1322/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1323/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01323: ReduceLROnPlateau reducing learning rate to 1.8371022867298352e-42.
Epoch 1324/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1325/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1326/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1327/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1328/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1329/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1330/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1331/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1332/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1333/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01333: ReduceLROnPlateau reducing learning rate to 9.185511433649176e-43.
Epoch 1334/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1335/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1336/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1337/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1338/2000
191/191 [==============================] - 0s 126us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1339/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1340/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1341/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1342/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1343/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01343: ReduceLROnPlateau reducing learning rate to 4.5962589629854e-43.
Epoch 1344/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1345/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1346/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1347/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1348/2000
191/191 [==============================] - 0s 126us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1349/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1350/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1351/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1352/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1353/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01353: ReduceLROnPlateau reducing learning rate to 2.2981294814927e-43.
Epoch 1354/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1355/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1356/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1357/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1358/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1359/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1360/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1361/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1362/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1363/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01363: ReduceLROnPlateau reducing learning rate to 1.14906474074635e-43.
Epoch 1364/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1365/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1366/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1367/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1368/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1369/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1370/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1371/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1372/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1373/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01373: ReduceLROnPlateau reducing learning rate to 5.74532370373175e-44.
Epoch 1374/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1375/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1376/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1377/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1378/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1379/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1380/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1381/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1382/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1383/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01383: ReduceLROnPlateau reducing learning rate to 2.872661851865875e-44.
Epoch 1384/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1385/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1386/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1387/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1388/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1389/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1390/2000
191/191 [==============================] - ETA: 0s - loss: 0.0479 - accuracy: 1.00 - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1391/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1392/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1393/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01393: ReduceLROnPlateau reducing learning rate to 1.401298464324817e-44.
Epoch 1394/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1395/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1396/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1397/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1398/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1399/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1400/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1401/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1402/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1403/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01403: ReduceLROnPlateau reducing learning rate to 7.006492321624085e-45.
Epoch 1404/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1405/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1406/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1407/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1408/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1409/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1410/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1411/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1412/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1413/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01413: ReduceLROnPlateau reducing learning rate to 3.5032461608120427e-45.
Epoch 1414/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1415/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1416/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1417/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1418/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1419/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1420/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1421/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1422/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1423/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01423: ReduceLROnPlateau reducing learning rate to 1.401298464324817e-45.
Epoch 1424/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1425/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1426/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1427/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1428/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1429/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1430/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1431/2000
191/191 [==============================] - ETA: 0s - loss: 0.0420 - accuracy: 1.00 - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1432/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1433/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656

Epoch 01433: ReduceLROnPlateau reducing learning rate to 7.006492321624085e-46.
Epoch 1434/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1435/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1436/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1437/2000
191/191 [==============================] - 0s 126us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1438/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1439/2000
191/191 [==============================] - 0s 126us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1440/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1441/2000
191/191 [==============================] - 0s 136us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1442/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1443/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1444/2000
191/191 [==============================] - 0s 131us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1445/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1446/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1447/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1448/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1449/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1450/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1451/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1452/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1453/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1454/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1455/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1456/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1457/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1458/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1459/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1460/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1461/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1462/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1463/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1464/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1465/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1466/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1467/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1468/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1469/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1470/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1471/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1472/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1473/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1474/2000
191/191 [==============================] - ETA: 0s - loss: 0.0491 - accuracy: 1.00 - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1475/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1476/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1477/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1478/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1479/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1480/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1481/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1482/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1483/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1484/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1485/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1486/2000
191/191 [==============================] - 0s 147us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1487/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1488/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1489/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1490/2000
191/191 [==============================] - 0s 114us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1491/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1492/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1493/2000
191/191 [==============================] - 0s 109us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1494/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1495/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1496/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1497/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1498/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1499/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1500/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1501/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1502/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1503/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1504/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1505/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1506/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1507/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1508/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1509/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1510/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1511/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1512/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1513/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1514/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1515/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1516/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1517/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1518/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1519/2000
191/191 [==============================] - 0s 95us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1520/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1521/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1522/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1523/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1524/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1525/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1526/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1527/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1528/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1529/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1530/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1531/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1532/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1533/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1534/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1535/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1536/2000
191/191 [==============================] - 0s 131us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1537/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1538/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1539/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1540/2000
191/191 [==============================] - ETA: 0s - loss: 0.0384 - accuracy: 1.00 - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1541/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1542/2000
191/191 [==============================] - 0s 188us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1543/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1544/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1545/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1546/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1547/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1548/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1549/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1550/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1551/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1552/2000
191/191 [==============================] - 0s 136us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1553/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1554/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1555/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1556/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1557/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1558/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1559/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1560/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1561/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1562/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1563/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1564/2000
191/191 [==============================] - ETA: 0s - loss: 0.0228 - accuracy: 1.00 - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1565/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1566/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1567/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1568/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1569/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1570/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1571/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1572/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1573/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1574/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1575/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1576/2000
191/191 [==============================] - 0s 111us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1577/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1578/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1579/2000
191/191 [==============================] - 0s 102us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1580/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1581/2000
191/191 [==============================] - 0s 126us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1582/2000
191/191 [==============================] - 0s 126us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1583/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1584/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1585/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1586/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1587/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1588/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1589/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1590/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1591/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1592/2000
191/191 [==============================] - 0s 101us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1593/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1594/2000
191/191 [==============================] - 0s 112us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1595/2000
191/191 [==============================] - 0s 106us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1596/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1597/2000
191/191 [==============================] - ETA: 0s - loss: 0.0252 - accuracy: 1.00 - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1598/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1599/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1600/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1601/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1602/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1603/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1604/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1605/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1606/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1607/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1608/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1609/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1610/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1611/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1612/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1613/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1614/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1615/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1616/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1617/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1618/2000
191/191 [==============================] - 0s 126us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1619/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1620/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1621/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1622/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1623/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1624/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1625/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1626/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1627/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1628/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1629/2000
191/191 [==============================] - 0s 136us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1630/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1631/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1632/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1633/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1634/2000
191/191 [==============================] - 0s 126us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1635/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1636/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1637/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1638/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1639/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1640/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1641/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1642/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1643/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1644/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1645/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1646/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1647/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1648/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1649/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1650/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1651/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1652/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1653/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1654/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1655/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1656/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1657/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1658/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1659/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1660/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1661/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1662/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1663/2000
191/191 [==============================] - 0s 152us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1664/2000
191/191 [==============================] - 0s 136us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1665/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1666/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1667/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1668/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1669/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1670/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1671/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1672/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1673/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1674/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1675/2000
191/191 [==============================] - 0s 131us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1676/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1677/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1678/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1679/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1680/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1681/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1682/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1683/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1684/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1685/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1686/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1687/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1688/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1689/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1690/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1691/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1692/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1693/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1694/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1695/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1696/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1697/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1698/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1699/2000
191/191 [==============================] - 0s 131us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1700/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1701/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1702/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1703/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1704/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1705/2000
191/191 [==============================] - 0s 79us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1706/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1707/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1708/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1709/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1710/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1711/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1712/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1713/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1714/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1715/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1716/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1717/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1718/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1719/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1720/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1721/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1722/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1723/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1724/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1725/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1726/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1727/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1728/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1729/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1730/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1731/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1732/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1733/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1734/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1735/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1736/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1737/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1738/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1739/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1740/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1741/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1742/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1743/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1744/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1745/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1746/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1747/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1748/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1749/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1750/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1751/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1752/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1753/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1754/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1755/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1756/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1757/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1758/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1759/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1760/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1761/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1762/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1763/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1764/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1765/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1766/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1767/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1768/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1769/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1770/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1771/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1772/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1773/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1774/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1775/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1776/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1777/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1778/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1779/2000
191/191 [==============================] - 0s 105us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1780/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1781/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1782/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1783/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1784/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1785/2000
191/191 [==============================] - 0s 120us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1786/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1787/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1788/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1789/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1790/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1791/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1792/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1793/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1794/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1795/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1796/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1797/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1798/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1799/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1800/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1801/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1802/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1803/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1804/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1805/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1806/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1807/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1808/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1809/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1810/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1811/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1812/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1813/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1814/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1815/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1816/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1817/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1818/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1819/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1820/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1821/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1822/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1823/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1824/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1825/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1826/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1827/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1828/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1829/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1830/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1831/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1832/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1833/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1834/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1835/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1836/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1837/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1838/2000
191/191 [==============================] - 0s 79us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1839/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1840/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1841/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1842/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1843/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1844/2000
191/191 [==============================] - 0s 115us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1845/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1846/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1847/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1848/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1849/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1850/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1851/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1852/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1853/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1854/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1855/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1856/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1857/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1858/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1859/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1860/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1861/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1862/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1863/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1864/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1865/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1866/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1867/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1868/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1869/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1870/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1871/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1872/2000
191/191 [==============================] - 0s 79us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1873/2000
191/191 [==============================] - ETA: 0s - loss: 0.0489 - accuracy: 1.00 - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1874/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1875/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1876/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1877/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1878/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1879/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1880/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1881/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1882/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1883/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1884/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1885/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1886/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1887/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1888/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1889/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1890/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1891/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1892/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1893/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1894/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1895/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1896/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1897/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1898/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1899/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1900/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1901/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1902/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1903/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1904/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1905/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1906/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1907/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1908/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1909/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1910/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1911/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1912/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1913/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1914/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1915/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1916/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1917/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1918/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1919/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1920/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1921/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1922/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1923/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1924/2000
191/191 [==============================] - 0s 79us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1925/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1926/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1927/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1928/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1929/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1930/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1931/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1932/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1933/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1934/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1935/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1936/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1937/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1938/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1939/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1940/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1941/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1942/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1943/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1944/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1945/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1946/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1947/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1948/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1949/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1950/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1951/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1952/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1953/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1954/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1955/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1956/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1957/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1958/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1959/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1960/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1961/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1962/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1963/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1964/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1965/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1966/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1967/2000
191/191 [==============================] - 0s 58us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1968/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1969/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1970/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1971/2000
191/191 [==============================] - ETA: 0s - loss: 0.0400 - accuracy: 1.00 - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1972/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1973/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1974/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1975/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1976/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1977/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1978/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1979/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1980/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1981/2000
191/191 [==============================] - 0s 110us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1982/2000
191/191 [==============================] - 0s 99us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1983/2000
191/191 [==============================] - ETA: 0s - loss: 0.0319 - accuracy: 1.00 - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1984/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1985/2000
191/191 [==============================] - 0s 73us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1986/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1987/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1988/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1989/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1990/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1991/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1992/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1993/2000
191/191 [==============================] - 0s 68us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1994/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1995/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1996/2000
191/191 [==============================] - 0s 94us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1997/2000
191/191 [==============================] - 0s 63us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1998/2000
191/191 [==============================] - 0s 78us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 1999/2000
191/191 [==============================] - 0s 89us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
Epoch 2000/2000
191/191 [==============================] - 0s 84us/step - loss: 0.0429 - accuracy: 0.9948 - val_loss: 0.8193 - val_accuracy: 0.7656
In [57]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 2000)
In [58]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
64/64 [==============================] - 0s 47us/step
test loss: 0.8193266093730927, test accuracy: 0.765625
In [64]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.7282608695652174
In [65]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
Kappa:  0.366754617414248

KMeans

In [66]:
X
Out[66]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13
0 -0.339415 0.847773 0.497198 -0.389310 1.225458 1.947033 -0.736267 0.492219 0.576682 1.504697 -1.796460 0.724954 0.958600
1 0.587658 -1.195426 0.636375 0.199876 0.765321 0.061181 0.379367 -0.440867 0.232893 1.339920 0.110001 0.807525 0.815678
2 1.465595 -2.307943 0.354567 -0.058273 -1.298853 -0.811453 -1.551580 -3.934320 -1.079432 2.546130 1.421407 0.639359 0.199094
3 0.749403 -1.690498 -0.125200 -1.016135 0.825845 0.271444 -0.104786 -0.992141 0.049182 1.425948 -0.343269 -0.789558 -0.411898
4 -0.280577 0.393332 0.744917 2.411400 -0.777421 -0.420018 1.258355 -1.544565 -0.498071 0.421527 -0.632908 -0.056846 -0.072348
5 -0.158690 0.404891 -0.147920 -0.299241 -0.786974 0.697216 0.290501 0.019739 -1.468086 -0.346174 -0.086965 0.026492 1.019512
6 1.646777 0.772744 -1.425228 -0.562610 -1.556076 0.533289 -0.404271 1.676958 0.979516 0.415548 0.544719 0.433332 0.204271
7 1.124970 0.506236 0.738993 1.984485 -0.928706 -0.494097 -0.707105 -0.494778 -1.642929 0.207467 0.181382 2.431721 0.848697
8 0.920059 1.438862 -2.048354 1.503567 -2.801303 0.567132 -0.745441 0.569519 0.130917 1.965436 -0.034797 1.164878 0.074074
9 0.182544 0.310622 0.067722 0.870138 0.168366 0.682045 -0.191296 -0.144962 -0.630020 -0.284032 -0.315301 0.344841 0.495167
10 0.168663 0.389450 0.034360 1.213392 0.248437 0.870618 -0.460824 -0.174734 -0.710502 -0.228408 -0.265153 0.349416 0.584114
11 0.153010 -0.118336 0.639531 1.504522 0.937909 0.356048 -0.089987 -0.628522 0.064203 0.966049 0.403915 -0.943626 0.173874
12 0.132578 0.261966 -2.871493 -3.398160 -0.256458 1.596532 -0.358711 0.175955 -0.499075 0.949085 2.235525 -0.197712 -0.272366
13 1.094629 0.885150 -1.130672 -0.083270 0.672482 0.750453 -0.863949 0.140540 0.423312 -0.305155 -0.424905 0.318660 0.885900
14 0.771472 0.364448 -0.454696 0.434253 0.912699 0.745924 -0.073390 -0.406473 0.450765 0.323180 -0.458826 -0.132295 0.495454
15 0.677561 0.166795 0.746471 0.075191 0.867924 -1.621678 0.771146 -0.067286 0.557998 -0.093593 0.020233 -0.800013 -0.629188
16 -0.032353 1.227345 -0.188580 0.927210 0.016663 1.001867 -0.473811 0.782387 1.542760 -0.345478 -0.838104 -0.439443 1.179204
17 0.459031 1.258961 -0.329412 1.391790 -0.208888 1.059241 -1.245671 0.619153 0.245780 0.644548 -0.602629 -0.928581 0.739885
18 -0.359172 0.051214 -0.603962 0.778896 1.630471 1.802477 1.486205 -0.140738 -0.894366 0.736624 2.114721 1.078175 -0.965785
19 0.209859 -0.615399 -0.676895 0.735655 0.805509 -0.696793 1.073068 0.240429 -0.205934 -0.759693 0.672843 0.569482 -0.455391
20 0.127381 -0.265099 -0.258801 -0.127568 0.649447 0.244473 1.897421 -0.344616 -0.593159 0.065147 1.787607 1.219355 -0.171813
21 1.222717 0.409860 1.311826 0.703873 0.322062 0.305461 -0.522644 -0.750833 0.001767 0.017953 0.254329 -0.227762 -0.614790
22 1.173352 0.490500 0.742825 -0.028159 -0.272396 -0.502733 -0.759443 -1.031924 -0.157975 0.075659 0.604220 0.143298 -0.001849
23 1.069960 0.858822 -0.795544 0.076688 0.851875 0.735014 -0.758779 0.065595 0.532667 -0.391858 -0.497019 0.240822 0.848126
24 0.581377 -0.804045 0.399887 1.535671 0.245878 0.904192 -0.233991 -0.925983 0.212280 0.499535 -0.024926 -0.925999 1.294925
25 0.161110 0.025075 0.716318 1.532230 0.889883 0.353167 -0.058787 -0.593046 0.093773 0.927085 0.199691 -0.979872 0.232850
26 0.431443 0.442713 0.259120 0.045533 0.102675 0.367606 0.054320 0.942924 0.180609 0.550983 0.265291 0.321252 -0.830969
27 0.344525 -1.140315 -0.725453 -0.547965 0.449924 0.303904 1.053624 1.051712 0.509322 0.181611 -0.519979 -1.134490 -1.439105
28 -0.041565 0.671274 0.195143 0.247294 0.531620 1.050124 0.311358 0.988161 -0.198869 0.387795 1.757366 1.351684 0.194840
29 0.417845 -1.134173 -0.760709 -0.605264 0.077464 0.533333 1.104524 2.124971 0.083548 0.801730 0.092534 -1.281628 -1.468782
... ... ... ... ... ... ... ... ... ... ... ... ... ...
225 1.532114 -1.060006 -0.434145 -0.999435 -1.259462 0.039140 -0.802013 -0.655286 0.714448 1.005958 -0.086372 0.537392 0.054440
226 -0.942320 1.172080 0.506725 -0.230675 -0.104635 0.898742 -1.107001 -1.182148 -0.940991 0.232366 1.778224 0.975251 1.731084
227 1.421974 0.631029 -0.563813 -0.694595 -0.673270 0.929022 0.476907 -1.025173 -0.813644 -0.060006 -0.738730 -0.558099 0.057654
228 -1.473385 -0.806223 1.849423 -1.252541 0.941013 -0.872947 -1.812392 -0.242718 -0.097212 -0.510500 -0.232195 -0.546399 0.945530
229 -1.135926 -0.772372 1.164844 -1.022517 0.630202 -0.496999 -1.101656 -0.168921 -0.295159 -0.587401 0.369033 -0.266325 0.604469
230 -1.085049 0.879566 0.442593 0.128917 0.393498 0.531555 0.392194 1.418515 0.891015 -0.348926 -0.756201 -0.838584 -0.015971
231 -0.352258 0.556982 0.530520 0.443818 0.300921 0.032128 -0.797384 -0.573532 0.398084 0.328875 -0.274964 -1.300920 0.254456
232 -1.190363 0.797356 0.758472 0.587917 0.890540 0.471925 0.105793 0.680721 0.230834 -0.150709 -0.816744 -0.470618 0.371198
233 -0.651003 -0.586618 1.326854 -0.451354 0.507113 0.165474 -0.919675 -0.448249 -1.310940 -1.372737 0.406029 -1.414627 -0.434858
234 -1.459511 -0.516281 1.631699 -1.141842 0.584621 -0.458541 -1.428877 -0.934556 -0.216455 -0.049794 0.095580 0.387068 0.693730
235 -0.726984 0.702447 0.798069 -0.320660 0.530902 1.019988 0.144995 0.207847 0.039592 0.220761 0.762941 0.575034 0.671517
236 -0.300986 -0.404923 0.715406 0.245380 -0.427936 -0.334843 -0.228084 -0.330898 -0.674327 0.199560 0.827455 0.016433 0.866789
237 -0.736244 0.088611 0.910051 0.437100 0.258256 0.363828 -0.415290 -0.717445 -0.012727 0.436925 -0.786954 -1.217376 0.352825
238 0.610473 -2.664315 1.303652 -2.022376 1.500032 -1.280926 -1.249533 0.432111 -0.768558 0.291156 -0.092312 0.053770 -0.401166
239 -2.045424 -2.954642 0.302601 -0.868092 -1.038134 -1.230777 0.514329 0.057591 -1.023895 0.275395 -1.450282 0.386242 0.318763
240 0.329793 -1.367570 -1.454329 -0.207924 -0.723609 -0.149025 -0.085298 -0.011595 -0.240239 -0.009120 -0.325229 -0.025722 0.114182
241 -1.919591 1.382172 -0.134161 0.837967 -0.687780 0.944303 -0.258652 -0.742178 0.386031 -1.178099 -1.843543 -0.710556 -0.318561
242 -2.087669 1.400006 -0.494964 0.451717 -0.759188 0.736625 0.133121 -0.196031 1.121231 0.474128 -0.345937 -0.409324 -0.442069
243 -2.131652 0.439305 -0.612226 0.854126 -0.494550 0.825299 0.301373 -0.018964 0.690556 -0.078762 -0.709495 -0.075857 -0.418656
244 -1.611989 -0.756403 -0.410917 1.075909 0.297336 -1.317576 1.115011 -0.467065 -0.768378 1.615499 1.611125 -1.018782 -1.798744
245 -0.142010 0.000190 -0.063461 -0.506353 -0.386942 -0.256144 0.270621 -1.497417 0.507892 0.456828 -0.431169 -0.978417 0.015849
246 -1.263975 -1.168117 -1.396090 -0.312016 1.862268 1.400290 0.646060 -0.686864 0.418524 -0.069926 -0.653856 -0.853617 -0.106814
247 -0.507700 0.899825 1.510153 1.083642 2.081451 0.589016 0.901321 0.658808 0.152596 0.176442 -0.447633 0.287838 0.650479
248 -0.159768 0.518093 2.197018 0.698491 0.476336 -2.014255 -1.614667 -0.397282 -1.781932 -0.208894 1.650551 -0.771436 -0.987237
249 -1.037899 1.016712 2.774230 0.665468 -0.385673 0.587263 -0.121609 -0.331379 0.622484 -0.387131 -0.276584 0.218207 1.689216
250 -0.526923 -1.169944 0.474875 -0.789231 0.369827 -0.537003 -1.089843 -0.173366 -0.023237 -0.142334 0.740065 0.813114 0.872556
251 -0.770856 -1.024349 -0.019140 -0.097521 0.092703 0.369242 -0.273901 0.190740 -0.074032 0.113055 0.140291 -0.696275 0.166679
252 -0.905458 -0.790575 0.206164 -0.723816 -0.444860 0.107833 -0.734514 -0.533865 -0.634334 0.320526 0.088428 -0.348210 0.347201
253 -1.378235 -0.338405 0.016815 -0.394563 0.034043 1.023865 -0.303960 -1.316121 0.198697 0.670577 0.809574 0.580565 0.056004
254 -0.199959 -2.035812 -0.904507 -1.511975 -0.437843 0.262972 -1.943788 -1.963300 -2.256227 0.354369 -0.039829 0.882325 0.139307

255 rows × 13 columns

In [67]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[67]:
[3315.0,
 2972.7888695817974,
 2748.18187155972,
 2544.9420084212106,
 2413.687059384553,
 2278.037996783226,
 2213.3487507256823,
 2123.4282707474663,
 2067.8299633414163,
 1977.777252698108,
 1956.5229777214513,
 1880.0296166971755,
 1815.5096049846275,
 1785.9955747862728]
In [68]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[68]:
[<matplotlib.lines.Line2D at 0x1e82ae84f98>]

K=6

In [69]:
kmeans_ch = KMeans(n_clusters=6, random_state=0, n_init=10)
kmeans_ch.fit(X)
Out[69]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=6, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [70]:
kmeans_ch.labels_
Out[70]:
array([4, 2, 2, 2, 0, 1, 1, 0, 1, 4, 4, 4, 1, 4, 4, 3, 4, 4, 4, 4, 4, 0,
       0, 4, 4, 4, 4, 3, 4, 3, 4, 4, 3, 1, 3, 1, 4, 4, 3, 1, 1, 1, 4, 4,
       2, 1, 1, 1, 4, 1, 1, 1, 4, 3, 3, 4, 4, 1, 4, 1, 0, 4, 4, 4, 3, 3,
       3, 4, 3, 0, 4, 2, 1, 4, 0, 4, 4, 3, 3, 0, 0, 2, 1, 4, 1, 0, 0, 3,
       4, 1, 4, 4, 4, 4, 1, 1, 0, 3, 3, 3, 4, 1, 1, 4, 1, 4, 0, 3, 1, 1,
       1, 0, 3, 2, 4, 4, 0, 1, 5, 3, 4, 0, 0, 2, 1, 0, 0, 0, 2, 2, 2, 2,
       4, 4, 0, 0, 2, 2, 2, 4, 4, 4, 2, 2, 2, 3, 0, 4, 1, 3, 3, 3, 3, 3,
       4, 3, 1, 0, 0, 0, 1, 1, 0, 2, 2, 0, 3, 0, 1, 0, 0, 0, 0, 1, 3, 2,
       4, 4, 1, 2, 4, 3, 2, 3, 4, 4, 4, 1, 2, 3, 3, 0, 2, 2, 2, 1, 0, 1,
       4, 2, 0, 4, 3, 3, 2, 3, 5, 5, 2, 3, 3, 2, 0, 3, 0, 2, 4, 2, 1, 2,
       1, 3, 4, 4, 4, 1, 0, 1, 2, 2, 3, 0, 4, 2, 2, 4, 0, 2, 2, 2, 1, 3,
       4, 4, 2, 2, 2, 4, 0, 0, 2, 2, 2, 2, 2])
In [71]:
clusters_ch = kmeans_ch.predict(X)
clusters_ch
Out[71]:
array([4, 2, 2, 2, 0, 1, 1, 0, 1, 4, 4, 4, 1, 4, 4, 3, 4, 4, 4, 4, 4, 0,
       0, 4, 4, 4, 4, 3, 4, 3, 4, 4, 3, 1, 3, 1, 4, 4, 3, 1, 1, 1, 4, 4,
       2, 1, 1, 1, 4, 1, 1, 1, 4, 3, 3, 4, 4, 1, 4, 1, 0, 4, 4, 4, 3, 3,
       3, 4, 3, 0, 4, 2, 1, 4, 0, 4, 4, 3, 3, 0, 0, 2, 1, 4, 1, 0, 0, 3,
       4, 1, 4, 4, 4, 4, 1, 1, 0, 3, 3, 3, 4, 1, 1, 4, 1, 4, 0, 3, 1, 1,
       1, 0, 3, 2, 4, 4, 0, 1, 5, 3, 4, 0, 0, 2, 1, 0, 0, 0, 2, 2, 2, 2,
       4, 4, 0, 0, 2, 2, 2, 4, 4, 4, 2, 2, 2, 3, 0, 4, 1, 3, 3, 3, 3, 3,
       4, 3, 1, 0, 0, 0, 1, 1, 0, 2, 2, 0, 3, 0, 1, 0, 0, 0, 0, 1, 3, 2,
       4, 4, 1, 2, 4, 3, 2, 3, 4, 4, 4, 1, 2, 3, 3, 0, 2, 2, 2, 1, 0, 1,
       4, 2, 0, 4, 3, 3, 2, 3, 5, 5, 2, 3, 3, 2, 0, 3, 0, 2, 4, 2, 1, 2,
       1, 3, 4, 4, 4, 1, 0, 1, 2, 2, 3, 0, 4, 2, 2, 4, 0, 2, 2, 2, 1, 3,
       4, 4, 2, 2, 2, 4, 0, 0, 2, 2, 2, 2, 2])
In [72]:
X.loc[:,'Cluster'] = clusters_ch
X.loc[:,'chosen'] = list(y)
In [73]:
X
Out[73]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13 Cluster chosen
0 -0.339415 0.847773 0.497198 -0.389310 1.225458 1.947033 -0.736267 0.492219 0.576682 1.504697 -1.796460 0.724954 0.958600 4 0
1 0.587658 -1.195426 0.636375 0.199876 0.765321 0.061181 0.379367 -0.440867 0.232893 1.339920 0.110001 0.807525 0.815678 2 0
2 1.465595 -2.307943 0.354567 -0.058273 -1.298853 -0.811453 -1.551580 -3.934320 -1.079432 2.546130 1.421407 0.639359 0.199094 2 0
3 0.749403 -1.690498 -0.125200 -1.016135 0.825845 0.271444 -0.104786 -0.992141 0.049182 1.425948 -0.343269 -0.789558 -0.411898 2 0
4 -0.280577 0.393332 0.744917 2.411400 -0.777421 -0.420018 1.258355 -1.544565 -0.498071 0.421527 -0.632908 -0.056846 -0.072348 0 0
5 -0.158690 0.404891 -0.147920 -0.299241 -0.786974 0.697216 0.290501 0.019739 -1.468086 -0.346174 -0.086965 0.026492 1.019512 1 0
6 1.646777 0.772744 -1.425228 -0.562610 -1.556076 0.533289 -0.404271 1.676958 0.979516 0.415548 0.544719 0.433332 0.204271 1 0
7 1.124970 0.506236 0.738993 1.984485 -0.928706 -0.494097 -0.707105 -0.494778 -1.642929 0.207467 0.181382 2.431721 0.848697 0 0
8 0.920059 1.438862 -2.048354 1.503567 -2.801303 0.567132 -0.745441 0.569519 0.130917 1.965436 -0.034797 1.164878 0.074074 1 0
9 0.182544 0.310622 0.067722 0.870138 0.168366 0.682045 -0.191296 -0.144962 -0.630020 -0.284032 -0.315301 0.344841 0.495167 4 0
10 0.168663 0.389450 0.034360 1.213392 0.248437 0.870618 -0.460824 -0.174734 -0.710502 -0.228408 -0.265153 0.349416 0.584114 4 0
11 0.153010 -0.118336 0.639531 1.504522 0.937909 0.356048 -0.089987 -0.628522 0.064203 0.966049 0.403915 -0.943626 0.173874 4 0
12 0.132578 0.261966 -2.871493 -3.398160 -0.256458 1.596532 -0.358711 0.175955 -0.499075 0.949085 2.235525 -0.197712 -0.272366 1 0
13 1.094629 0.885150 -1.130672 -0.083270 0.672482 0.750453 -0.863949 0.140540 0.423312 -0.305155 -0.424905 0.318660 0.885900 4 0
14 0.771472 0.364448 -0.454696 0.434253 0.912699 0.745924 -0.073390 -0.406473 0.450765 0.323180 -0.458826 -0.132295 0.495454 4 0
15 0.677561 0.166795 0.746471 0.075191 0.867924 -1.621678 0.771146 -0.067286 0.557998 -0.093593 0.020233 -0.800013 -0.629188 3 0
16 -0.032353 1.227345 -0.188580 0.927210 0.016663 1.001867 -0.473811 0.782387 1.542760 -0.345478 -0.838104 -0.439443 1.179204 4 0
17 0.459031 1.258961 -0.329412 1.391790 -0.208888 1.059241 -1.245671 0.619153 0.245780 0.644548 -0.602629 -0.928581 0.739885 4 0
18 -0.359172 0.051214 -0.603962 0.778896 1.630471 1.802477 1.486205 -0.140738 -0.894366 0.736624 2.114721 1.078175 -0.965785 4 0
19 0.209859 -0.615399 -0.676895 0.735655 0.805509 -0.696793 1.073068 0.240429 -0.205934 -0.759693 0.672843 0.569482 -0.455391 4 0
20 0.127381 -0.265099 -0.258801 -0.127568 0.649447 0.244473 1.897421 -0.344616 -0.593159 0.065147 1.787607 1.219355 -0.171813 4 0
21 1.222717 0.409860 1.311826 0.703873 0.322062 0.305461 -0.522644 -0.750833 0.001767 0.017953 0.254329 -0.227762 -0.614790 0 0
22 1.173352 0.490500 0.742825 -0.028159 -0.272396 -0.502733 -0.759443 -1.031924 -0.157975 0.075659 0.604220 0.143298 -0.001849 0 0
23 1.069960 0.858822 -0.795544 0.076688 0.851875 0.735014 -0.758779 0.065595 0.532667 -0.391858 -0.497019 0.240822 0.848126 4 0
24 0.581377 -0.804045 0.399887 1.535671 0.245878 0.904192 -0.233991 -0.925983 0.212280 0.499535 -0.024926 -0.925999 1.294925 4 0
25 0.161110 0.025075 0.716318 1.532230 0.889883 0.353167 -0.058787 -0.593046 0.093773 0.927085 0.199691 -0.979872 0.232850 4 0
26 0.431443 0.442713 0.259120 0.045533 0.102675 0.367606 0.054320 0.942924 0.180609 0.550983 0.265291 0.321252 -0.830969 4 0
27 0.344525 -1.140315 -0.725453 -0.547965 0.449924 0.303904 1.053624 1.051712 0.509322 0.181611 -0.519979 -1.134490 -1.439105 3 0
28 -0.041565 0.671274 0.195143 0.247294 0.531620 1.050124 0.311358 0.988161 -0.198869 0.387795 1.757366 1.351684 0.194840 4 0
29 0.417845 -1.134173 -0.760709 -0.605264 0.077464 0.533333 1.104524 2.124971 0.083548 0.801730 0.092534 -1.281628 -1.468782 3 0
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
225 1.532114 -1.060006 -0.434145 -0.999435 -1.259462 0.039140 -0.802013 -0.655286 0.714448 1.005958 -0.086372 0.537392 0.054440 1 1
226 -0.942320 1.172080 0.506725 -0.230675 -0.104635 0.898742 -1.107001 -1.182148 -0.940991 0.232366 1.778224 0.975251 1.731084 0 1
227 1.421974 0.631029 -0.563813 -0.694595 -0.673270 0.929022 0.476907 -1.025173 -0.813644 -0.060006 -0.738730 -0.558099 0.057654 1 1
228 -1.473385 -0.806223 1.849423 -1.252541 0.941013 -0.872947 -1.812392 -0.242718 -0.097212 -0.510500 -0.232195 -0.546399 0.945530 2 1
229 -1.135926 -0.772372 1.164844 -1.022517 0.630202 -0.496999 -1.101656 -0.168921 -0.295159 -0.587401 0.369033 -0.266325 0.604469 2 1
230 -1.085049 0.879566 0.442593 0.128917 0.393498 0.531555 0.392194 1.418515 0.891015 -0.348926 -0.756201 -0.838584 -0.015971 3 1
231 -0.352258 0.556982 0.530520 0.443818 0.300921 0.032128 -0.797384 -0.573532 0.398084 0.328875 -0.274964 -1.300920 0.254456 0 1
232 -1.190363 0.797356 0.758472 0.587917 0.890540 0.471925 0.105793 0.680721 0.230834 -0.150709 -0.816744 -0.470618 0.371198 4 1
233 -0.651003 -0.586618 1.326854 -0.451354 0.507113 0.165474 -0.919675 -0.448249 -1.310940 -1.372737 0.406029 -1.414627 -0.434858 2 1
234 -1.459511 -0.516281 1.631699 -1.141842 0.584621 -0.458541 -1.428877 -0.934556 -0.216455 -0.049794 0.095580 0.387068 0.693730 2 1
235 -0.726984 0.702447 0.798069 -0.320660 0.530902 1.019988 0.144995 0.207847 0.039592 0.220761 0.762941 0.575034 0.671517 4 1
236 -0.300986 -0.404923 0.715406 0.245380 -0.427936 -0.334843 -0.228084 -0.330898 -0.674327 0.199560 0.827455 0.016433 0.866789 0 1
237 -0.736244 0.088611 0.910051 0.437100 0.258256 0.363828 -0.415290 -0.717445 -0.012727 0.436925 -0.786954 -1.217376 0.352825 2 1
238 0.610473 -2.664315 1.303652 -2.022376 1.500032 -1.280926 -1.249533 0.432111 -0.768558 0.291156 -0.092312 0.053770 -0.401166 2 1
239 -2.045424 -2.954642 0.302601 -0.868092 -1.038134 -1.230777 0.514329 0.057591 -1.023895 0.275395 -1.450282 0.386242 0.318763 2 1
240 0.329793 -1.367570 -1.454329 -0.207924 -0.723609 -0.149025 -0.085298 -0.011595 -0.240239 -0.009120 -0.325229 -0.025722 0.114182 1 1
241 -1.919591 1.382172 -0.134161 0.837967 -0.687780 0.944303 -0.258652 -0.742178 0.386031 -1.178099 -1.843543 -0.710556 -0.318561 3 1
242 -2.087669 1.400006 -0.494964 0.451717 -0.759188 0.736625 0.133121 -0.196031 1.121231 0.474128 -0.345937 -0.409324 -0.442069 4 1
243 -2.131652 0.439305 -0.612226 0.854126 -0.494550 0.825299 0.301373 -0.018964 0.690556 -0.078762 -0.709495 -0.075857 -0.418656 4 1
244 -1.611989 -0.756403 -0.410917 1.075909 0.297336 -1.317576 1.115011 -0.467065 -0.768378 1.615499 1.611125 -1.018782 -1.798744 2 1
245 -0.142010 0.000190 -0.063461 -0.506353 -0.386942 -0.256144 0.270621 -1.497417 0.507892 0.456828 -0.431169 -0.978417 0.015849 2 1
246 -1.263975 -1.168117 -1.396090 -0.312016 1.862268 1.400290 0.646060 -0.686864 0.418524 -0.069926 -0.653856 -0.853617 -0.106814 2 1
247 -0.507700 0.899825 1.510153 1.083642 2.081451 0.589016 0.901321 0.658808 0.152596 0.176442 -0.447633 0.287838 0.650479 4 1
248 -0.159768 0.518093 2.197018 0.698491 0.476336 -2.014255 -1.614667 -0.397282 -1.781932 -0.208894 1.650551 -0.771436 -0.987237 0 1
249 -1.037899 1.016712 2.774230 0.665468 -0.385673 0.587263 -0.121609 -0.331379 0.622484 -0.387131 -0.276584 0.218207 1.689216 0 1
250 -0.526923 -1.169944 0.474875 -0.789231 0.369827 -0.537003 -1.089843 -0.173366 -0.023237 -0.142334 0.740065 0.813114 0.872556 2 1
251 -0.770856 -1.024349 -0.019140 -0.097521 0.092703 0.369242 -0.273901 0.190740 -0.074032 0.113055 0.140291 -0.696275 0.166679 2 1
252 -0.905458 -0.790575 0.206164 -0.723816 -0.444860 0.107833 -0.734514 -0.533865 -0.634334 0.320526 0.088428 -0.348210 0.347201 2 1
253 -1.378235 -0.338405 0.016815 -0.394563 0.034043 1.023865 -0.303960 -1.316121 0.198697 0.670577 0.809574 0.580565 0.056004 2 1
254 -0.199959 -2.035812 -0.904507 -1.511975 -0.437843 0.262972 -1.943788 -1.963300 -2.256227 0.354369 -0.039829 0.882325 0.139307 2 1

255 rows × 15 columns

In [74]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[74]:
<matplotlib.axes._subplots.AxesSubplot at 0x1e82aec1940>
In [ ]:
 
In [ ]:
 
In [ ]: